Commit 4d154b1c authored by Lijo Lazar's avatar Lijo Lazar Committed by Alex Deucher

drm/amd/pm: Add support for DPM policies

Add support to set/get information about different DPM policies. The
support is only available on SOCs which use swsmu architecture.

A DPM policy type may be defined with different levels. For example, a
policy may be defined to select Pstate preference and then later a
pstate preference may be chosen.
Signed-off-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarAsad Kamal <asad.kamal@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f6bce954
......@@ -273,6 +273,22 @@ enum pp_xgmi_plpd_mode {
XGMI_PLPD_COUNT,
};
enum pp_pm_policy {
PP_PM_POLICY_NONE = -1,
PP_PM_POLICY_SOC_PSTATE = 0,
PP_PM_POLICY_NUM,
};
enum pp_policy_soc_pstate {
SOC_PSTATE_DEFAULT = 0,
SOC_PSTATE_0,
SOC_PSTATE_1,
SOC_PSTATE_2,
SOC_PSTAT_COUNT,
};
#define PP_POLICY_MAX_LEVELS 5
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
......
......@@ -411,6 +411,36 @@ int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
return ret;
}
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_get_pm_policy_info(smu, p_type, buf);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
int policy_level)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_set_pm_policy(smu, policy_type, policy_level);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
{
void *pp_handle = adev->powerplay.pp_handle;
......
......@@ -2278,6 +2278,131 @@ static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev,
return count;
}
/* pm policy attributes */
struct amdgpu_pm_policy_attr {
struct device_attribute dev_attr;
enum pp_pm_policy id;
};
static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct amdgpu_pm_policy_attr *policy_attr;
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
}
static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct amdgpu_pm_policy_attr *policy_attr;
int ret, num_params = 0;
char delimiter[] = " \n\t";
char tmp_buf[128];
char *tmp, *param;
long val;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
count = min(count, sizeof(tmp_buf));
memcpy(tmp_buf, buf, count);
tmp_buf[count - 1] = '\0';
tmp = tmp_buf;
tmp = skip_spaces(tmp);
while ((param = strsep(&tmp, delimiter))) {
if (!strlen(param)) {
tmp = skip_spaces(tmp);
continue;
}
ret = kstrtol(param, 0, &val);
if (ret)
return -EINVAL;
num_params++;
if (num_params > 1)
return -EINVAL;
}
if (num_params != 1)
return -EINVAL;
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return ret;
return count;
}
#define AMDGPU_PM_POLICY_ATTR(_name, _id) \
static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
.dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
amdgpu_set_pm_policy_attr), \
.id = PP_PM_POLICY_##_id, \
};
#define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
static struct attribute *pm_policy_attrs[] = {
&AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
NULL
};
static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct amdgpu_pm_policy_attr *policy_attr;
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
-ENOENT)
return 0;
return attr->mode;
}
const struct attribute_group amdgpu_pm_policy_attr_group = {
.name = "pm_policy",
.attrs = pm_policy_attrs,
.is_visible = amdgpu_pm_policy_attr_visible,
};
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
......@@ -4419,6 +4544,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
dev_info(adev->dev, "overdrive feature is not supported\n");
}
if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
-EOPNOTSUPP) {
ret = devm_device_add_group(adev->dev,
&amdgpu_pm_policy_attr_group);
if (ret)
goto err_out0;
}
adev->pm.sysfs_initialized = true;
return 0;
......
......@@ -598,4 +598,9 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
unsigned int *num_states);
int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
struct dpm_clocks *clock_table);
int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
int policy_level);
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
#endif
......@@ -3498,6 +3498,105 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
return 0;
}
static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
size_t *size)
{
size_t offset = *size;
int level;
for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
if (level == policy->current_level)
offset += sysfs_emit_at(sysbuf, offset,
"%d : %s*\n", level,
policy->desc->get_desc(policy, level));
else
offset += sysfs_emit_at(sysbuf, offset,
"%d : %s\n", level,
policy->desc->get_desc(policy, level));
}
*size = offset;
}
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf)
{
struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
struct smu_dpm_policy_ctxt *policy_ctxt;
struct smu_dpm_policy *dpm_policy;
size_t offset = 0;
policy_ctxt = dpm_ctxt->dpm_policies;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
!policy_ctxt->policy_mask)
return -EOPNOTSUPP;
if (p_type == PP_PM_POLICY_NONE)
return -EINVAL;
dpm_policy = smu_get_pm_policy(smu, p_type);
if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
return -ENOENT;
if (!sysbuf)
return -EINVAL;
smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
return offset;
}
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type)
{
struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
struct smu_dpm_policy_ctxt *policy_ctxt;
int i;
policy_ctxt = dpm_ctxt->dpm_policies;
if (!policy_ctxt)
return NULL;
for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
if (policy_ctxt->policies[i].policy_type == p_type)
return &policy_ctxt->policies[i];
}
return NULL;
}
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level)
{
struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
struct smu_dpm_policy *dpm_policy = NULL;
struct smu_dpm_policy_ctxt *policy_ctxt;
int ret = -EOPNOTSUPP;
policy_ctxt = dpm_ctxt->dpm_policies;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
!policy_ctxt->policy_mask)
return ret;
if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
return -EINVAL;
dpm_policy = smu_get_pm_policy(smu, p_type);
if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
return ret;
if (dpm_policy->current_level == level)
return 0;
ret = dpm_policy->set_policy(smu, level);
if (!ret)
dpm_policy->current_level = level;
return ret;
}
int smu_set_xgmi_plpd_mode(struct smu_context *smu,
enum pp_xgmi_plpd_mode mode)
{
......
......@@ -362,6 +362,27 @@ struct smu_table_context {
void *gpu_metrics_table;
};
struct smu_context;
struct smu_dpm_policy;
struct smu_dpm_policy_desc {
const char *name;
char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level);
};
struct smu_dpm_policy {
struct smu_dpm_policy_desc *desc;
enum pp_pm_policy policy_type;
unsigned long level_mask;
int current_level;
int (*set_policy)(struct smu_context *ctxt, int level);
};
struct smu_dpm_policy_ctxt {
struct smu_dpm_policy policies[PP_PM_POLICY_NUM];
unsigned long policy_mask;
};
struct smu_dpm_context {
uint32_t dpm_context_size;
void *dpm_context;
......@@ -372,6 +393,7 @@ struct smu_dpm_context {
struct smu_power_state *dpm_request_power_state;
struct smu_power_state *dpm_current_power_state;
struct mclock_latency_table *mclk_latency_table;
struct smu_dpm_policy_ctxt *dpm_policies;
};
struct smu_power_gate {
......@@ -1551,6 +1573,11 @@ typedef struct {
uint32_t MmHubPadding[8];
} WifiBandEntryTable_t;
#define STR_SOC_PSTATE_POLICY "soc_pstate"
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type);
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
......@@ -1598,5 +1625,10 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment