Commit 79c65f3f authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/pm: do not expose power implementation details to amdgpu_pm.c

amdgpu_pm.c holds all the user sysfs/hwmon interfaces. It's another
client of our power APIs. It's not proper to spike into power
implementation details there.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bc143d8b
...@@ -1453,7 +1453,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -1453,7 +1453,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
if (equal) if (equal)
return; return;
amdgpu_dpm_set_power_state(adev); if (adev->powerplay.pp_funcs->set_power_state)
adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle);
amdgpu_dpm_post_set_power_state(adev); amdgpu_dpm_post_set_power_state(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
...@@ -1704,3 +1706,460 @@ int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, ...@@ -1704,3 +1706,460 @@ int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
return smu_get_ecc_info(&adev->smu, umc_ecc); return smu_get_ecc_info(&adev->smu, umc_ecc);
} }
struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
uint32_t idx)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_vce_clock_state)
return NULL;
return pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
idx);
}
void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
enum amd_pm_state_type *state)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_current_power_state) {
*state = adev->pm.dpm.user_state;
return;
}
*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
if (*state < POWER_STATE_TYPE_DEFAULT ||
*state > POWER_STATE_TYPE_INTERNAL_3DPERF)
*state = adev->pm.dpm.user_state;
}
void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
enum amd_pm_state_type state)
{
adev->pm.dpm.user_state = state;
if (is_support_sw_smu(adev))
return;
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_ENABLE_USER_STATE,
&state) == -EOPNOTSUPP)
amdgpu_pm_compute_clocks(adev);
}
enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
if (pp_funcs->get_performance_level)
level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
else
level = adev->pm.dpm.forced_level;
return level;
}
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active)
return -EINVAL;
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level))
return -EINVAL;
adev->pm.dpm.forced_level = level;
}
return 0;
}
int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
struct pp_states_info *states)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_pp_num_states)
return -EOPNOTSUPP;
return pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, states);
}
int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->dispatch_tasks)
return -EOPNOTSUPP;
return pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, task_id, user_state);
}
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_pp_table)
return 0;
return pp_funcs->get_pp_table(adev->powerplay.pp_handle, table);
}
int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
uint32_t type,
long *input,
uint32_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_fine_grain_clk_vol)
return 0;
return pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
type,
input,
size);
}
int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
uint32_t type,
long *input,
uint32_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->odn_edit_dpm_table)
return 0;
return pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
type,
input,
size);
}
int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
enum pp_clock_type type,
char *buf)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->print_clock_levels)
return 0;
return pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
type,
buf);
}
int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
uint64_t ppfeature_masks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_ppfeature_status)
return 0;
return pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
ppfeature_masks);
}
int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_ppfeature_status)
return 0;
return pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
buf);
}
int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t mask)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->force_clock_level)
return 0;
return pp_funcs->force_clock_level(adev->powerplay.pp_handle,
type,
mask);
}
int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_sclk_od)
return 0;
return pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
}
int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (is_support_sw_smu(adev))
return 0;
if (pp_funcs->set_sclk_od)
pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL) == -EOPNOTSUPP) {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
return 0;
}
int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_mclk_od)
return 0;
return pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
}
int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (is_support_sw_smu(adev))
return 0;
if (pp_funcs->set_mclk_od)
pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL) == -EOPNOTSUPP) {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
return 0;
}
int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
char *buf)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_power_profile_mode)
return -EOPNOTSUPP;
return pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
buf);
}
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_power_profile_mode)
return 0;
return pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
input,
size);
}
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_gpu_metrics)
return 0;
return pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, table);
}
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_fan_control_mode)
return -EOPNOTSUPP;
*fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle);
return 0;
}
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_fan_speed_pwm)
return -EINVAL;
return pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, speed);
}
int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t *speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_fan_speed_pwm)
return -EINVAL;
return pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, speed);
}
int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t *speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_fan_speed_rpm)
return -EINVAL;
return pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, speed);
}
int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_fan_speed_rpm)
return -EINVAL;
return pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, speed);
}
int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
uint32_t mode)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_fan_control_mode)
return -EOPNOTSUPP;
pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, mode);
return 0;
}
int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_power_limit)
return -ENODATA;
return pp_funcs->get_power_limit(adev->powerplay.pp_handle,
limit,
pp_limit_level,
power_type);
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_power_limit)
return -EINVAL;
return pp_funcs->set_power_limit(adev->powerplay.pp_handle, limit);
}
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
{
if (!is_support_sw_smu(adev))
return false;
return is_support_cclk_dpm(adev);
}
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->debugfs_print_current_performance_level)
return -EOPNOTSUPP;
pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
m);
return 0;
}
int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->get_smu_prv_buf_details)
return -ENOSYS;
return pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
addr,
size);
}
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
(is_support_sw_smu(adev) && adev->smu.is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
return true;
return false;
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_pp_table)
return -EOPNOTSUPP;
return pp_funcs->set_pp_table(adev->powerplay.pp_handle,
buf,
size);
}
int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
{
return adev->smu.cpu_core_num;
}
void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
{
if (!is_support_sw_smu(adev))
return;
amdgpu_smu_stb_debug_fs_init(adev);
}
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <asm/processor.h> #include <asm/processor.h>
#include "hwmgr.h"
static const struct cg_flag_name clocks[] = { static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
...@@ -132,7 +131,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, ...@@ -132,7 +131,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_pm_state_type pm; enum amd_pm_state_type pm;
int ret; int ret;
...@@ -147,11 +145,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, ...@@ -147,11 +145,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
return ret; return ret;
} }
if (pp_funcs->get_current_power_state) { amdgpu_dpm_get_current_power_state(adev, &pm);
pm = amdgpu_dpm_get_current_power_state(adev);
} else {
pm = adev->pm.dpm.user_state;
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -191,19 +185,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, ...@@ -191,19 +185,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
return ret; return ret;
} }
if (is_support_sw_smu(adev)) { amdgpu_dpm_set_power_state(adev, state);
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
} else if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
amdgpu_pm_compute_clocks(adev);
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -290,10 +273,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, ...@@ -290,10 +273,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->get_performance_level) level = amdgpu_dpm_get_performance_level(adev);
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -318,7 +298,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -318,7 +298,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level; enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level; enum amd_dpm_forced_level current_level;
int ret = 0; int ret = 0;
...@@ -358,11 +337,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -358,11 +337,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return ret; return ret;
} }
if (pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev);
current_level = amdgpu_dpm_get_performance_level(adev);
else
current_level = adev->pm.dpm.forced_level;
if (current_level == level) { if (current_level == level) {
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -390,25 +365,12 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -390,25 +365,12 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL; return -EINVAL;
} }
if (pp_funcs->force_performance_level) { if (amdgpu_dpm_force_performance_level(adev, level)) {
mutex_lock(&adev->pm.mutex); pm_runtime_mark_last_busy(ddev->dev);
if (adev->pm.dpm.thermal_active) { pm_runtime_put_autosuspend(ddev->dev);
mutex_unlock(&adev->pm.mutex); return -EINVAL;
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
if (ret) {
mutex_unlock(&adev->pm.mutex);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
} else {
adev->pm.dpm.forced_level = level;
}
mutex_unlock(&adev->pm.mutex);
} }
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -421,7 +383,6 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ...@@ -421,7 +383,6 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data; struct pp_states_info data;
uint32_t i; uint32_t i;
int buf_len, ret; int buf_len, ret;
...@@ -437,11 +398,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ...@@ -437,11 +398,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
return ret; return ret;
} }
if (pp_funcs->get_pp_num_states) { if (amdgpu_dpm_get_pp_num_states(adev, &data))
amdgpu_dpm_get_pp_num_states(adev, &data);
} else {
memset(&data, 0, sizeof(data)); memset(&data, 0, sizeof(data));
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -463,7 +421,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, ...@@ -463,7 +421,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data = {0}; struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0; enum amd_pm_state_type pm = 0;
int i = 0, ret = 0; int i = 0, ret = 0;
...@@ -479,15 +436,16 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, ...@@ -479,15 +436,16 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
return ret; return ret;
} }
if (pp_funcs->get_current_power_state amdgpu_dpm_get_current_power_state(adev, &pm);
&& pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev); ret = amdgpu_dpm_get_pp_num_states(adev, &data);
amdgpu_dpm_get_pp_num_states(adev, &data);
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return ret;
for (i = 0; i < data.nums; i++) { for (i = 0; i < data.nums; i++) {
if (pm == data.states[i]) if (pm == data.states[i])
break; break;
...@@ -525,6 +483,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, ...@@ -525,6 +483,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state = 0; enum amd_pm_state_type state = 0;
struct pp_states_info data;
unsigned long idx; unsigned long idx;
int ret; int ret;
...@@ -533,41 +492,49 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, ...@@ -533,41 +492,49 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm) if (adev->in_suspend && !adev->in_runpm)
return -EPERM; return -EPERM;
if (strlen(buf) == 1) adev->pp_force_state_enabled = false;
adev->pp_force_state_enabled = false;
else if (is_support_sw_smu(adev))
adev->pp_force_state_enabled = false;
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
if (ret || idx >= ARRAY_SIZE(data.states))
return -EINVAL;
idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); if (strlen(buf) == 1)
return count;
amdgpu_dpm_get_pp_num_states(adev, &data); ret = kstrtoul(buf, 0, &idx);
state = data.states[idx]; if (ret || idx >= ARRAY_SIZE(data.states))
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev); idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
/* only set user selected power states */ ret = pm_runtime_get_sync(ddev->dev);
if (state != POWER_STATE_TYPE_INTERNAL_BOOT && if (ret < 0) {
state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_ENABLE_USER_STATE, &state);
adev->pp_force_state_enabled = true;
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
if (ret)
goto err_out;
state = data.states[idx];
/* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
ret = amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_ENABLE_USER_STATE, &state);
if (ret)
goto err_out;
adev->pp_force_state_enabled = true;
} }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count; return count;
err_out:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return ret;
} }
/** /**
...@@ -601,17 +568,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, ...@@ -601,17 +568,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->get_pp_table) { size = amdgpu_dpm_get_pp_table(adev, &table);
size = amdgpu_dpm_get_pp_table(adev, &table);
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
if (size < 0)
return size; if (size <= 0)
} else { return size;
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return 0;
}
if (size >= PAGE_SIZE) if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1; size = PAGE_SIZE - 1;
...@@ -642,15 +605,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, ...@@ -642,15 +605,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
} }
ret = amdgpu_dpm_set_pp_table(adev, buf, count); ret = amdgpu_dpm_set_pp_table(adev, buf, count);
if (ret) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return ret;
return count; return count;
} }
...@@ -866,46 +827,32 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, ...@@ -866,46 +827,32 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, type,
parameter, parameter,
parameter_size); parameter_size))
if (ret) { goto err_out;
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
}
if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, parameter_size))
parameter, parameter_size); goto err_out;
if (ret) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
}
if (type == PP_OD_COMMIT_DPM_TABLE) { if (type == PP_OD_COMMIT_DPM_TABLE) {
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (amdgpu_dpm_dispatch_task(adev,
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE,
AMD_PP_TASK_READJUST_POWER_STATE, NULL))
NULL); goto err_out;
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
} else {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
} }
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
return count; return count;
err_out:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
} }
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
...@@ -928,8 +875,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, ...@@ -928,8 +875,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->print_clock_levels) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); if (size > 0) {
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
...@@ -985,17 +932,14 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, ...@@ -985,17 +932,14 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->set_ppfeature_status) { ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
if (ret) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
return count; return count;
} }
...@@ -1019,9 +963,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, ...@@ -1019,9 +963,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->get_ppfeature_status) size = amdgpu_dpm_get_ppfeature_status(adev, buf);
size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0)
else
size = sysfs_emit(buf, "\n"); size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
...@@ -1080,9 +1023,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, ...@@ -1080,9 +1023,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->print_clock_levels) size = amdgpu_dpm_print_clock_levels(adev, type, buf);
size = amdgpu_dpm_print_clock_levels(adev, type, buf); if (size <= 0)
else
size = sysfs_emit(buf, "\n"); size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
...@@ -1151,10 +1093,7 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, ...@@ -1151,10 +1093,7 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, type, mask);
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
else
ret = 0;
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -1305,10 +1244,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, ...@@ -1305,10 +1244,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
return ret; return ret;
} }
if (is_support_sw_smu(adev)) value = amdgpu_dpm_get_sclk_od(adev);
value = 0;
else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -1342,19 +1278,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, ...@@ -1342,19 +1278,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
return ret; return ret;
} }
if (is_support_sw_smu(adev)) { amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
value = 0;
} else {
if (adev->powerplay.pp_funcs->set_sclk_od)
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -1382,10 +1306,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, ...@@ -1382,10 +1306,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
return ret; return ret;
} }
if (is_support_sw_smu(adev)) value = amdgpu_dpm_get_mclk_od(adev);
value = 0;
else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -1419,19 +1340,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, ...@@ -1419,19 +1340,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
return ret; return ret;
} }
if (is_support_sw_smu(adev)) { amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
value = 0;
} else {
if (adev->powerplay.pp_funcs->set_mclk_od)
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
}
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -1479,9 +1388,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ...@@ -1479,9 +1388,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->get_power_profile_mode) size = amdgpu_dpm_get_power_profile_mode(adev, buf);
size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0)
else
size = sysfs_emit(buf, "\n"); size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
...@@ -1545,8 +1453,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, ...@@ -1545,8 +1453,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
...@@ -1812,9 +1719,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, ...@@ -1812,9 +1719,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
return ret; return ret;
} }
if (adev->powerplay.pp_funcs->get_gpu_metrics) size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0) if (size <= 0)
goto out; goto out;
...@@ -2053,7 +1958,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ ...@@ -2053,7 +1958,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{ {
struct device_attribute *dev_attr = &attr->dev_attr; struct device_attribute *dev_attr = &attr->dev_attr;
const char *attr_name = dev_attr->attr.name; const char *attr_name = dev_attr->attr.name;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
enum amd_asic_type asic_type = adev->asic_type; enum amd_asic_type asic_type = adev->asic_type;
if (!(attr->flags & mask)) { if (!(attr->flags & mask)) {
...@@ -2076,9 +1980,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ ...@@ -2076,9 +1980,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED; *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED; *states = ATTR_STATE_UNSUPPORTED;
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || if (amdgpu_dpm_is_overdrive_supported(adev))
(is_support_sw_smu(adev) && adev->smu.is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
*states = ATTR_STATE_SUPPORTED; *states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) { } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
...@@ -2106,8 +2008,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ ...@@ -2106,8 +2008,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED; *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (!adev->powerplay.pp_funcs->get_power_profile_mode || if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED; *states = ATTR_STATE_UNSUPPORTED;
} }
...@@ -2396,17 +2297,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, ...@@ -2396,17 +2297,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
return ret; return ret;
} }
if (!adev->powerplay.pp_funcs->get_fan_control_mode) { ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
return sysfs_emit(buf, "%u\n", pwm_mode); return sysfs_emit(buf, "%u\n", pwm_mode);
} }
...@@ -2434,17 +2332,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, ...@@ -2434,17 +2332,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
return ret; return ret;
} }
if (!adev->powerplay.pp_funcs->set_fan_control_mode) { ret = amdgpu_dpm_set_fan_control_mode(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
amdgpu_dpm_set_fan_control_mode(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
return count; return count;
} }
...@@ -2476,32 +2371,29 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, ...@@ -2476,32 +2371,29 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (adev->in_suspend && !adev->in_runpm) if (adev->in_suspend && !adev->in_runpm)
return -EPERM; return -EPERM;
err = kstrtou32(buf, 10, &value);
if (err)
return err;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err; return err;
} }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
goto out;
if (pwm_mode != AMD_FAN_CTRL_MANUAL) { if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n"); pr_info("manual fan speed control should be enabled first\n");
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); err = -EINVAL;
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); goto out;
return -EINVAL;
} }
err = kstrtou32(buf, 10, &value); err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
if (err) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
else
err = -EINVAL;
out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
...@@ -2530,10 +2422,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, ...@@ -2530,10 +2422,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err; return err;
} }
if (adev->powerplay.pp_funcs->get_fan_speed_pwm) err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
else
err = -EINVAL;
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
...@@ -2563,10 +2452,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, ...@@ -2563,10 +2452,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return err; return err;
} }
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
else
err = -EINVAL;
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
...@@ -2660,10 +2546,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, ...@@ -2660,10 +2546,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return err; return err;
} }
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
else
err = -EINVAL;
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
...@@ -2688,32 +2571,28 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, ...@@ -2688,32 +2571,28 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (adev->in_suspend && !adev->in_runpm) if (adev->in_suspend && !adev->in_runpm)
return -EPERM; return -EPERM;
err = kstrtou32(buf, 10, &value);
if (err)
return err;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err; return err;
} }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
goto out;
if (pwm_mode != AMD_FAN_CTRL_MANUAL) { if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); err = -ENODATA;
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); goto out;
return -ENODATA;
}
err = kstrtou32(buf, 10, &value);
if (err) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
} }
if (adev->powerplay.pp_funcs->set_fan_speed_rpm) err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
else
err = -EINVAL;
out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
...@@ -2742,17 +2621,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, ...@@ -2742,17 +2621,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
return ret; return ret;
} }
if (!adev->powerplay.pp_funcs->get_fan_control_mode) { ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
} }
...@@ -2788,16 +2664,14 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, ...@@ -2788,16 +2664,14 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
return err; return err;
} }
if (!adev->powerplay.pp_funcs->set_fan_control_mode) { err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return -EINVAL;
return count; return count;
} }
...@@ -2933,7 +2807,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, ...@@ -2933,7 +2807,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
enum pp_power_limit_level pp_limit_level) enum pp_power_limit_level pp_limit_level)
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index; enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
uint32_t limit; uint32_t limit;
ssize_t size; ssize_t size;
...@@ -2944,16 +2817,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, ...@@ -2944,16 +2817,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
if (adev->in_suspend && !adev->in_runpm) if (adev->in_suspend && !adev->in_runpm)
return -EPERM; return -EPERM;
if ( !(pp_funcs && pp_funcs->get_power_limit))
return -ENODATA;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r; return r;
} }
r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type); pp_limit_level, power_type);
if (!r) if (!r)
...@@ -3008,7 +2878,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, ...@@ -3008,7 +2878,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
size_t count) size_t count)
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int limit_type = to_sensor_dev_attr(attr)->index; int limit_type = to_sensor_dev_attr(attr)->index;
int err; int err;
u32 value; u32 value;
...@@ -3034,10 +2903,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, ...@@ -3034,10 +2903,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err; return err;
} }
if (pp_funcs && pp_funcs->set_power_limit) err = amdgpu_dpm_set_power_limit(adev, value);
err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
else
err = -EINVAL;
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
...@@ -3310,6 +3176,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, ...@@ -3310,6 +3176,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj); struct device *dev = kobj_to_dev(kobj);
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode; umode_t effective_mode = attr->mode;
uint32_t speed = 0;
/* under multi-vf mode, the hwmon attributes are all not supported */ /* under multi-vf mode, the hwmon attributes are all not supported */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
...@@ -3374,20 +3241,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, ...@@ -3374,20 +3241,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0; return 0;
if (!is_support_sw_smu(adev)) { /* mask fan attributes if we have no bindings for this asic to expose */
/* mask fan attributes if we have no bindings for this asic to expose */ if (((amdgpu_dpm_get_fan_speed_pwm(adev, &speed) == -EINVAL) &&
if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm && attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ ((amdgpu_dpm_get_fan_control_mode(adev, &speed) == -EOPNOTSUPP) &&
(!adev->powerplay.pp_funcs->get_fan_control_mode && attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ effective_mode &= ~S_IRUGO;
effective_mode &= ~S_IRUGO;
if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm && if (((amdgpu_dpm_set_fan_speed_pwm(adev, speed) == -EINVAL) &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
(!adev->powerplay.pp_funcs->set_fan_control_mode && ((amdgpu_dpm_set_fan_control_mode(adev, speed) == -EOPNOTSUPP) &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR; effective_mode &= ~S_IWUSR;
}
if (((adev->family == AMDGPU_FAMILY_SI) || if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && ((adev->flags & AMD_IS_APU) &&
...@@ -3404,22 +3269,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, ...@@ -3404,22 +3269,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(attr == &sensor_dev_attr_power1_average.dev_attr.attr)) (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
return 0; return 0;
if (!is_support_sw_smu(adev)) { /* hide max/min values if we can't both query and manage the fan */
/* hide max/min values if we can't both query and manage the fan */ if (((amdgpu_dpm_set_fan_speed_pwm(adev, speed) == -EINVAL) &&
if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm && (amdgpu_dpm_get_fan_speed_pwm(adev, &speed) == -EINVAL) &&
!adev->powerplay.pp_funcs->get_fan_speed_pwm) && (amdgpu_dpm_set_fan_speed_rpm(adev, speed) == -EINVAL) &&
(!adev->powerplay.pp_funcs->set_fan_speed_rpm && (amdgpu_dpm_get_fan_speed_rpm(adev, &speed) == -EINVAL)) &&
!adev->powerplay.pp_funcs->get_fan_speed_rpm) && (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0;
return 0;
if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && if ((amdgpu_dpm_set_fan_speed_rpm(adev, speed) == -EINVAL) &&
!adev->powerplay.pp_funcs->get_fan_speed_rpm) && (amdgpu_dpm_get_fan_speed_rpm(adev, &speed) == -EINVAL) &&
(attr == &sensor_dev_attr_fan1_max.dev_attr.attr || (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
return 0; return 0;
}
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
...@@ -3549,14 +3412,15 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m, ...@@ -3549,14 +3412,15 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
uint16_t *p_val; uint16_t *p_val;
uint32_t size; uint32_t size;
int i; int i;
uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
if (is_support_cclk_dpm(adev)) { if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t), p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
GFP_KERNEL); GFP_KERNEL);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK, if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
(void *)p_val, &size)) { (void *)p_val, &size)) {
for (i = 0; i < adev->smu.cpu_core_num; i++) for (i = 0; i < num_cpu_cores; i++)
seq_printf(m, "\t%u MHz (CPU%d)\n", seq_printf(m, "\t%u MHz (CPU%d)\n",
*(p_val + i), i); *(p_val + i), i);
} }
...@@ -3684,27 +3548,11 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) ...@@ -3684,27 +3548,11 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
return r; return r;
} }
if (!adev->pm.dpm_enabled) { if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
seq_printf(m, "dpm not enabled\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
}
if (!is_support_sw_smu(adev) &&
adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
r = 0;
} else {
r = amdgpu_debugfs_pm_info_pp(m, adev); r = amdgpu_debugfs_pm_info_pp(m, adev);
if (r)
goto out;
} }
if (r)
goto out;
amdgpu_device_ip_get_clockgating_state(adev, &flags); amdgpu_device_ip_get_clockgating_state(adev, &flags);
...@@ -3730,21 +3578,18 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, ...@@ -3730,21 +3578,18 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
size_t size, loff_t *pos) size_t size, loff_t *pos)
{ {
struct amdgpu_device *adev = file_inode(f)->i_private; struct amdgpu_device *adev = file_inode(f)->i_private;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
size_t smu_prv_buf_size; size_t smu_prv_buf_size;
void *smu_prv_buf; void *smu_prv_buf;
int ret = 0;
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm) if (adev->in_suspend && !adev->in_runpm)
return -EPERM; return -EPERM;
if (pp_funcs && pp_funcs->get_smu_prv_buf_details) ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf, if (ret)
&smu_prv_buf_size); return ret;
else
return -ENOSYS;
if (!smu_prv_buf || !smu_prv_buf_size) if (!smu_prv_buf || !smu_prv_buf_size)
return -EINVAL; return -EINVAL;
...@@ -3777,6 +3622,6 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) ...@@ -3777,6 +3622,6 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
&amdgpu_debugfs_pm_prv_buffer_fops, &amdgpu_debugfs_pm_prv_buffer_fops,
adev->pm.smu_prv_buffer_size); adev->pm.smu_prv_buffer_size);
amdgpu_smu_stb_debug_fs_init(adev); amdgpu_dpm_stb_debug_fs_init(adev);
#endif #endif
} }
...@@ -262,9 +262,6 @@ enum amdgpu_pcie_gen { ...@@ -262,9 +262,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_pre_set_power_state(adev) \ #define amdgpu_dpm_pre_set_power_state(adev) \
((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_power_state(adev) \
((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_post_set_power_state(adev) \ #define amdgpu_dpm_post_set_power_state(adev) \
((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
...@@ -280,100 +277,13 @@ enum amdgpu_pcie_gen { ...@@ -280,100 +277,13 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_enable_bapm(adev, e) \ #define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
#define amdgpu_dpm_get_pp_table(adev, table) \
((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
#define amdgpu_dpm_force_clock_level(adev, type, level) \
((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
#define amdgpu_dpm_get_sclk_od(adev) \
((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_sclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_get_mclk_od(adev) \
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
#define amdgpu_dpm_get_performance_level(adev) \
((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
#define amdgpu_dpm_reset_power_profile_state(adev, request) \ #define amdgpu_dpm_reset_power_profile_state(adev, request) \
((adev)->powerplay.pp_funcs->reset_power_profile_state(\ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request)) (adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
((adev)->powerplay.pp_funcs->set_power_profile_mode(\
(adev)->powerplay.pp_handle, parameter, size))
#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
(adev)->powerplay.pp_handle, type, parameter, size))
#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
((adev)->powerplay.pp_funcs->get_ppfeature_status(\
(adev)->powerplay.pp_handle, (buf)))
#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
((adev)->powerplay.pp_funcs->set_ppfeature_status(\
(adev)->powerplay.pp_handle, (ppfeatures)))
#define amdgpu_dpm_get_gpu_metrics(adev, table) \
((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
struct amdgpu_dpm { struct amdgpu_dpm {
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
/* number of valid power states */ /* number of valid power states */
...@@ -606,4 +516,74 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, ...@@ -606,4 +516,74 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
enum gfx_change_state state); enum gfx_change_state state);
int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
void *umc_ecc); void *umc_ecc);
struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
uint32_t idx);
void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
enum amd_pm_state_type state);
enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level);
int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
struct pp_states_info *states);
int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table);
int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
uint32_t type,
long *input,
uint32_t size);
int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
uint32_t type,
long *input,
uint32_t size);
int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
enum pp_clock_type type,
char *buf);
int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
uint64_t ppfeature_masks);
int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t mask);
int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
char *buf);
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode);
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t speed);
int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t *speed);
int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t *speed);
int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t speed);
int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
uint32_t mode);
int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
uint32_t limit);
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m);
int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size);
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size);
int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev);
void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev);
#endif #endif
...@@ -470,9 +470,6 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) ...@@ -470,9 +470,6 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
{ {
struct smu_context *smu = &adev->smu; struct smu_context *smu = &adev->smu;
if (!is_support_sw_smu(adev))
return false;
if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment