Commit 66c86828 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: move SMC message issuing APIs to smu_cmn.c

Considering they can be shared by all ASICs.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c1b353b7
...@@ -46,19 +46,6 @@ ...@@ -46,19 +46,6 @@
#undef pr_info #undef pr_info
#undef pr_debug #undef pr_debug
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
static const char* __smu_message_names[] = {
SMU_MESSAGE_TYPES
};
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
{
if (type < 0 || type >= SMU_MSG_MAX_COUNT)
return "unknown smu message";
return __smu_message_names[type];
}
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{ {
size_t size = 0; size_t size = 0;
......
...@@ -455,13 +455,13 @@ static int arcturus_run_btc(struct smu_context *smu) ...@@ -455,13 +455,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "RunAfllBtc failed!\n"); dev_err(smu->adev->dev, "RunAfllBtc failed!\n");
return ret; return ret;
} }
return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
} }
static int arcturus_populate_umd_state_clk(struct smu_context *smu) static int arcturus_populate_umd_state_clk(struct smu_context *smu)
...@@ -839,7 +839,7 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, ...@@ -839,7 +839,7 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) { (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value; freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_GFXCLK << 16) | (freq & 0xffff), (PPCLK_GFXCLK << 16) | (freq & 0xffff),
NULL); NULL);
...@@ -853,7 +853,7 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, ...@@ -853,7 +853,7 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
(feature_mask & FEATURE_DPM_UCLK_MASK)) { (feature_mask & FEATURE_DPM_UCLK_MASK)) {
freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value; freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_UCLK << 16) | (freq & 0xffff), (PPCLK_UCLK << 16) | (freq & 0xffff),
NULL); NULL);
...@@ -867,7 +867,7 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, ...@@ -867,7 +867,7 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) { (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value; freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_SOCCLK << 16) | (freq & 0xffff), (PPCLK_SOCCLK << 16) | (freq & 0xffff),
NULL); NULL);
...@@ -1358,7 +1358,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ...@@ -1358,7 +1358,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask, SMU_MSG_SetWorkloadMask,
1 << workload_type, 1 << workload_type,
NULL); NULL);
...@@ -2134,8 +2134,8 @@ static void arcturus_get_unique_id(struct smu_context *smu) ...@@ -2134,8 +2134,8 @@ static void arcturus_get_unique_id(struct smu_context *smu)
} }
/* Get the SN to turn into a Unique ID */ /* Get the SN to turn into a Unique ID */
smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32); smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32); smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
id = ((uint64_t)bottom32 << 32) | top32; id = ((uint64_t)bottom32 << 32) | top32;
adev->unique_id = id; adev->unique_id = id;
...@@ -2175,7 +2175,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu, ...@@ -2175,7 +2175,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
} }
static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en) static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
...@@ -2196,12 +2196,12 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en) ...@@ -2196,12 +2196,12 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
} }
if (en) if (en)
return smu_send_smc_msg_with_param(smu, return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl, SMU_MSG_GmiPwrDnControl,
1, 1,
NULL); NULL);
return smu_send_smc_msg_with_param(smu, return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl, SMU_MSG_GmiPwrDnControl,
0, 0,
NULL); NULL);
...@@ -2294,7 +2294,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = { ...@@ -2294,7 +2294,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location, .set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control, .system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL, .init_display_count = NULL,
.set_allowed_mask = smu_v11_0_set_allowed_mask, .set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_cmn_get_enabled_mask, .get_enabled_mask = smu_cmn_get_enabled_mask,
......
...@@ -528,6 +528,9 @@ struct pptable_funcs { ...@@ -528,6 +528,9 @@ struct pptable_funcs {
int (*system_features_control)(struct smu_context *smu, bool en); int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu, int (*send_smc_msg_with_param)(struct smu_context *smu,
enum smu_message_type msg, uint32_t param, uint32_t *read_arg); enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
int (*send_smc_msg)(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu); int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
...@@ -763,7 +766,6 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); ...@@ -763,7 +766,6 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count); int smu_set_display_count(struct smu_context *smu, uint32_t count);
int smu_set_ac_dc(struct smu_context *smu); int smu_set_ac_dc(struct smu_context *smu);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
int smu_force_clk_levels(struct smu_context *smu, int smu_force_clk_levels(struct smu_context *smu,
......
...@@ -165,12 +165,6 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); ...@@ -165,12 +165,6 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v11_0_system_features_control(struct smu_context *smu, int smu_v11_0_system_features_control(struct smu_context *smu,
bool en); bool en);
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
uint32_t *read_arg);
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
int smu_v11_0_set_allowed_mask(struct smu_context *smu); int smu_v11_0_set_allowed_mask(struct smu_context *smu);
......
...@@ -31,17 +31,6 @@ ...@@ -31,17 +31,6 @@
#define MP1_Public 0x03b00000 #define MP1_Public 0x03b00000
#define MP1_SRAM 0x03c00004 #define MP1_SRAM 0x03c00004
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg);
int smu_v12_0_wait_for_response(struct smu_context *smu);
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
uint32_t *read_arg);
int smu_v12_0_check_fw_status(struct smu_context *smu); int smu_v12_0_check_fw_status(struct smu_context *smu);
int smu_v12_0_check_fw_version(struct smu_context *smu); int smu_v12_0_check_fw_version(struct smu_context *smu);
......
...@@ -786,14 +786,14 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) ...@@ -786,14 +786,14 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */ /* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->vcn_gated = false; power_gate->vcn_gated = false;
} else { } else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -811,14 +811,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) ...@@ -811,14 +811,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->jpeg_gated = false; power_gate->jpeg_gated = false;
} else { } else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1258,7 +1258,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) ...@@ -1258,7 +1258,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t max_freq = 0; uint32_t max_freq = 0;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1281,7 +1281,7 @@ static int navi10_display_config_changed(struct smu_context *smu) ...@@ -1281,7 +1281,7 @@ static int navi10_display_config_changed(struct smu_context *smu)
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display, smu->display_config->num_display,
NULL); NULL);
if (ret) if (ret)
...@@ -1541,7 +1541,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -1541,7 +1541,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode); smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL); 1 << workload_type, NULL);
return ret; return ret;
...@@ -1564,7 +1564,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu) ...@@ -1564,7 +1564,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req); ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) { if (!ret) {
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100, min_clocks.dcef_clock_in_sr/100,
NULL); NULL);
...@@ -1884,7 +1884,7 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, ...@@ -1884,7 +1884,7 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
pptable->PcieLaneCount[i] : pcie_width_cap); pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters, SMU_MSG_OverridePcieParameters,
smu_pcie_arg, smu_pcie_arg,
NULL); NULL);
...@@ -1936,7 +1936,7 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ...@@ -1936,7 +1936,7 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
uint32_t value = 0; uint32_t value = 0;
int ret; int ret;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm, SMU_MSG_GetVoltageByDpm,
param, param,
&value); &value);
...@@ -2184,7 +2184,7 @@ static int navi10_run_btc(struct smu_context *smu) ...@@ -2184,7 +2184,7 @@ static int navi10_run_btc(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret) if (ret)
dev_err(smu->adev->dev, "RunBtc failed!\n"); dev_err(smu->adev->dev, "RunBtc failed!\n");
...@@ -2196,9 +2196,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) ...@@ -2196,9 +2196,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0; int result = 0;
if (!enable) if (!enable)
result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL); result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else else
result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL); result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result; return result;
} }
...@@ -2305,7 +2305,8 @@ static const struct pptable_funcs navi10_ppt_funcs = { ...@@ -2305,7 +2305,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location, .set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control, .system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = smu_v11_0_init_display_count, .init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask, .set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_cmn_get_enabled_mask, .get_enabled_mask = smu_cmn_get_enabled_mask,
......
...@@ -295,7 +295,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu, ...@@ -295,7 +295,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n"); dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n");
goto failed; goto failed;
...@@ -323,7 +323,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu, ...@@ -323,7 +323,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n"); dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n");
goto failed; goto failed;
...@@ -465,14 +465,14 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) ...@@ -465,14 +465,14 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */ /* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->vcn_gated = false; power_gate->vcn_gated = false;
} else { } else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -490,14 +490,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) ...@@ -490,14 +490,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) { if (enable) {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->jpeg_gated = false; power_gate->jpeg_gated = false;
} else { } else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -688,13 +688,13 @@ static int renoir_force_clk_levels(struct smu_context *smu, ...@@ -688,13 +688,13 @@ static int renoir_force_clk_levels(struct smu_context *smu,
ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq); ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq : soft_max_level == 0 ? min_freq :
soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq, soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
NULL); NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq : soft_min_level == 2 ? max_freq :
soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq, soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
NULL); NULL);
...@@ -708,10 +708,10 @@ static int renoir_force_clk_levels(struct smu_context *smu, ...@@ -708,10 +708,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq); ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -723,10 +723,10 @@ static int renoir_force_clk_levels(struct smu_context *smu, ...@@ -723,10 +723,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq); ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -760,7 +760,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -760,7 +760,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL; return -EINVAL;
} }
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type, 1 << workload_type,
NULL); NULL);
if (ret) { if (ret) {
...@@ -1010,7 +1010,8 @@ static const struct pptable_funcs renoir_ppt_funcs = { ...@@ -1010,7 +1010,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.check_fw_status = smu_v12_0_check_fw_status, .check_fw_status = smu_v12_0_check_fw_status,
.check_fw_version = smu_v12_0_check_fw_version, .check_fw_version = smu_v12_0_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma, .powergate_sdma = smu_v12_0_powergate_sdma,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control, .gfx_off_control = smu_v12_0_gfx_off_control,
.get_gfx_off_status = smu_v12_0_get_gfxoff_status, .get_gfx_off_status = smu_v12_0_get_gfxoff_status,
......
...@@ -756,11 +756,11 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl ...@@ -756,11 +756,11 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
if (enable) { if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */ /* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret) if (ret)
return ret; return ret;
if (adev->asic_type == CHIP_SIENNA_CICHLID) { if (adev->asic_type == CHIP_SIENNA_CICHLID) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
0x10000, NULL); 0x10000, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -769,11 +769,11 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl ...@@ -769,11 +769,11 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
power_gate->vcn_gated = false; power_gate->vcn_gated = false;
} else { } else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
if (ret) if (ret)
return ret; return ret;
if (adev->asic_type == CHIP_SIENNA_CICHLID) { if (adev->asic_type == CHIP_SIENNA_CICHLID) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
0x10000, NULL); 0x10000, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -793,14 +793,14 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab ...@@ -793,14 +793,14 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
if (enable) { if (enable) {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
power_gate->jpeg_gated = false; power_gate->jpeg_gated = false;
} else { } else {
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1058,7 +1058,7 @@ static int sienna_cichlid_pre_display_config_changed(struct smu_context *smu) ...@@ -1058,7 +1058,7 @@ static int sienna_cichlid_pre_display_config_changed(struct smu_context *smu)
/* Sienna_Cichlid do not support to change display num currently */ /* Sienna_Cichlid do not support to change display num currently */
return 0; return 0;
#if 0 #if 0
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret) if (ret)
return ret; return ret;
#endif #endif
...@@ -1083,7 +1083,7 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu) ...@@ -1083,7 +1083,7 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
#if 0 #if 0
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display, smu->display_config->num_display,
NULL); NULL);
#endif #endif
...@@ -1344,7 +1344,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * ...@@ -1344,7 +1344,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
smu->power_profile_mode); smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL); 1 << workload_type, NULL);
return ret; return ret;
...@@ -1367,7 +1367,7 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu) ...@@ -1367,7 +1367,7 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req); ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) { if (!ret) {
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100, min_clocks.dcef_clock_in_sr/100,
NULL); NULL);
...@@ -1689,7 +1689,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, ...@@ -1689,7 +1689,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pptable->PcieLaneCount[i] :
pcie_width_cap); pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters, SMU_MSG_OverridePcieParameters,
smu_pcie_arg, smu_pcie_arg,
NULL); NULL);
...@@ -2457,7 +2457,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { ...@@ -2457,7 +2457,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location, .set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control, .system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL, .init_display_count = NULL,
.set_allowed_mask = smu_v11_0_set_allowed_mask, .set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_cmn_get_enabled_mask, .get_enabled_mask = smu_cmn_get_enabled_mask,
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "amdgpu_smu.h" #include "amdgpu_smu.h"
#include "smu_cmn.h" #include "smu_cmn.h"
#include "smu_internal.h" #include "smu_internal.h"
#include "soc15_common.h"
/* /*
* DO NOT use these for err/warn/info/debug messages. * DO NOT use these for err/warn/info/debug messages.
...@@ -35,6 +36,126 @@ ...@@ -35,6 +36,126 @@
#undef pr_info #undef pr_info
#undef pr_debug #undef pr_debug
/*
* Although these are defined in each ASIC's specific header file.
* They share the same definitions and values. That makes common
* APIs for SMC messages issuing for all ASICs possible.
*/
#define mmMP1_SMN_C2PMSG_66 0x0282
#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
#define mmMP1_SMN_C2PMSG_82 0x0292
#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
static const char* __smu_message_names[] = {
SMU_MESSAGE_TYPES
};
static const char *smu_get_message_name(struct smu_context *smu,
enum smu_message_type type)
{
if (type < 0 || type >= SMU_MSG_MAX_COUNT)
return "unknown smu message";
return __smu_message_names[type];
}
static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
}
static void smu_cmn_read_arg(struct smu_context *smu,
uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
*arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
static int smu_cmn_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
return cur_value == 0x1 ? 0 : -EIO;
udelay(1);
}
/* timeout means wrong logic */
if (i == timeout)
return -ETIME;
return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg);
if (index < 0)
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
ret = smu_cmn_wait_for_response(smu);
if (ret) {
dev_err(adev->dev, "Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
goto out;
}
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_cmn_wait_for_response(smu);
if (ret) {
dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
goto out;
}
if (read_arg)
smu_cmn_read_arg(smu, read_arg);
out:
mutex_unlock(&smu->message_lock);
return ret;
}
int smu_cmn_send_smc_msg(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg)
{
return smu_cmn_send_smc_msg_with_param(smu,
msg,
0,
read_arg);
}
int smu_cmn_to_asic_specific_index(struct smu_context *smu, int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type, enum smu_cmn2asic_mapping_type type,
uint32_t index) uint32_t index)
...@@ -203,11 +324,11 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu, ...@@ -203,11 +324,11 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
return -EINVAL; return -EINVAL;
if (bitmap_empty(feature->enabled, feature->feature_num)) { if (bitmap_empty(feature->enabled, feature->feature_num)) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret) if (ret)
return ret; return ret;
...@@ -229,26 +350,26 @@ static int smu_cmn_feature_update_enable_state(struct smu_context *smu, ...@@ -229,26 +350,26 @@ static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
int ret = 0; int ret = 0;
if (enabled) { if (enabled) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnableSmuFeaturesLow, SMU_MSG_EnableSmuFeaturesLow,
lower_32_bits(feature_mask), lower_32_bits(feature_mask),
NULL); NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnableSmuFeaturesHigh, SMU_MSG_EnableSmuFeaturesHigh,
upper_32_bits(feature_mask), upper_32_bits(feature_mask),
NULL); NULL);
if (ret) if (ret)
return ret; return ret;
} else { } else {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesLow, SMU_MSG_DisableSmuFeaturesLow,
lower_32_bits(feature_mask), lower_32_bits(feature_mask),
NULL); NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesHigh, SMU_MSG_DisableSmuFeaturesHigh,
upper_32_bits(feature_mask), upper_32_bits(feature_mask),
NULL); NULL);
...@@ -423,7 +544,7 @@ int smu_cmn_get_smc_version(struct smu_context *smu, ...@@ -423,7 +544,7 @@ int smu_cmn_get_smc_version(struct smu_context *smu,
} }
if (if_version) { if (if_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret) if (ret)
return ret; return ret;
...@@ -431,7 +552,7 @@ int smu_cmn_get_smc_version(struct smu_context *smu, ...@@ -431,7 +552,7 @@ int smu_cmn_get_smc_version(struct smu_context *smu,
} }
if (smu_version) { if (smu_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret) if (ret)
return ret; return ret;
...@@ -469,7 +590,7 @@ int smu_cmn_update_table(struct smu_context *smu, ...@@ -469,7 +590,7 @@ int smu_cmn_update_table(struct smu_context *smu,
amdgpu_asic_flush_hdp(adev, NULL); amdgpu_asic_flush_hdp(adev, NULL);
} }
ret = smu_send_smc_msg_with_param(smu, drv2smu ? ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram, SMU_MSG_TransferTableSmu2Dram,
table_id | ((argument & 0xFFFF) << 16), table_id | ((argument & 0xFFFF) << 16),
......
...@@ -25,6 +25,15 @@ ...@@ -25,6 +25,15 @@
#include "amdgpu_smu.h" #include "amdgpu_smu.h"
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
uint32_t *read_arg);
int smu_cmn_send_smc_msg(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
int smu_cmn_to_asic_specific_index(struct smu_context *smu, int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type, enum smu_cmn2asic_mapping_type type,
uint32_t index); uint32_t index);
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu) #define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
#define smu_set_default_od_settings(smu) smu_ppt_funcs(set_default_od_settings, 0, smu) #define smu_set_default_od_settings(smu) smu_ppt_funcs(set_default_od_settings, 0, smu)
#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, param, read_arg) #define smu_send_smc_msg_with_param(smu, msg, param, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, param, read_arg)
#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, 0, read_arg) #define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
#define smu_init_display_count(smu, count) smu_ppt_funcs(init_display_count, 0, smu, count) #define smu_init_display_count(smu, count) smu_ppt_funcs(init_display_count, 0, smu, count)
#define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu) #define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu)
#define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num) #define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
......
...@@ -67,91 +67,6 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); ...@@ -67,91 +67,6 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
*arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
static int smu_v11_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
return cur_value == 0x1 ? 0 : -EIO;
udelay(1);
}
/* timeout means wrong logic */
if (i == timeout)
return -ETIME;
return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg);
if (index < 0)
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
ret = smu_v11_0_wait_for_response(smu);
if (ret) {
dev_err(adev->dev, "Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
goto out;
}
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
if (ret) {
dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
goto out;
}
if (read_arg) {
ret = smu_v11_0_read_arg(smu, read_arg);
if (ret) {
dev_err(adev->dev, "failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
goto out;
}
}
out:
mutex_unlock(&smu->message_lock);
return ret;
}
int smu_v11_0_init_microcode(struct smu_context *smu) int smu_v11_0_init_microcode(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
...@@ -683,13 +598,13 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) ...@@ -683,13 +598,13 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_high = (uint32_t)upper_32_bits(address); address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address); address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh, SMU_MSG_SetSystemVirtualDramAddrHigh,
address_high, address_high,
NULL); NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow, SMU_MSG_SetSystemVirtualDramAddrLow,
address_low, address_low,
NULL); NULL);
...@@ -700,15 +615,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) ...@@ -700,15 +615,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_high = (uint32_t)upper_32_bits(address); address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address); address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high, NULL); address_high, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low, NULL); address_low, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size, NULL); (uint32_t)memory_pool->size, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -720,7 +635,7 @@ int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) ...@@ -720,7 +635,7 @@ int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{ {
int ret; int ret;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret) if (ret)
dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!"); dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
...@@ -734,12 +649,12 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu) ...@@ -734,12 +649,12 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
int ret = 0; int ret = 0;
if (driver_table->mc_address) { if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh, SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address), upper_32_bits(driver_table->mc_address),
NULL); NULL);
if (!ret) if (!ret)
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow, SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address), lower_32_bits(driver_table->mc_address),
NULL); NULL);
...@@ -754,12 +669,12 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu) ...@@ -754,12 +669,12 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
if (tool_table->mc_address) { if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh, SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address), upper_32_bits(tool_table->mc_address),
NULL); NULL);
if (!ret) if (!ret)
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow, SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address), lower_32_bits(tool_table->mc_address),
NULL); NULL);
...@@ -780,7 +695,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) ...@@ -780,7 +695,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
if (!smu->pm_enabled) if (!smu->pm_enabled)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret; return ret;
} }
...@@ -797,12 +712,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) ...@@ -797,12 +712,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL); feature_mask[1], NULL);
if (ret) if (ret)
goto failed; goto failed;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
feature_mask[0], NULL); feature_mask[0], NULL);
if (ret) if (ret)
goto failed; goto failed;
...@@ -819,7 +734,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu, ...@@ -819,7 +734,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
uint32_t feature_mask[2]; uint32_t feature_mask[2];
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL); SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -850,7 +765,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu) ...@@ -850,7 +765,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret; return ret;
} }
...@@ -872,7 +787,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, ...@@ -872,7 +787,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
if (clk_id < 0) if (clk_id < 0)
return -EINVAL; return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
clk_id << 16, clock); clk_id << 16, clock);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
...@@ -883,7 +798,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, ...@@ -883,7 +798,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return 0; return 0;
/* if DC limit is zero, return AC limit */ /* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
clk_id << 16, clock); clk_id << 16, clock);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
...@@ -987,7 +902,7 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu, ...@@ -987,7 +902,7 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
if (power_src < 0) if (power_src < 0)
return -EINVAL; return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit, SMU_MSG_GetPptLimit,
power_src << 16, power_src << 16,
power_limit); power_limit);
...@@ -1006,7 +921,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) ...@@ -1006,7 +921,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) { if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret; return ret;
...@@ -1117,9 +1032,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) ...@@ -1117,9 +1032,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0; return 0;
if (enable) if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break; break;
default: default:
break; break;
...@@ -1255,7 +1170,7 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, ...@@ -1255,7 +1170,7 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate) uint32_t pstate)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode, SMU_MSG_SetXgmiMode,
pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
NULL); NULL);
...@@ -1329,7 +1244,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, ...@@ -1329,7 +1244,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
{ {
return smu_send_smc_msg(smu, return smu_cmn_send_smc_msg(smu,
SMU_MSG_ReenableAcDcInterrupt, SMU_MSG_ReenableAcDcInterrupt,
NULL); NULL);
} }
...@@ -1487,14 +1402,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) ...@@ -1487,14 +1402,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret; return ret;
} }
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{ {
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
} }
bool smu_v11_0_baco_is_support(struct smu_context *smu) bool smu_v11_0_baco_is_support(struct smu_context *smu)
...@@ -1548,12 +1463,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) ...@@ -1548,12 +1463,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
data |= 0x80000000; data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else { } else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
} }
} else { } else {
ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret) if (ret)
goto out; goto out;
...@@ -1606,7 +1521,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu) ...@@ -1606,7 +1521,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
if (!ret) if (!ret)
msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS); msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
...@@ -1657,13 +1572,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c ...@@ -1657,13 +1572,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16; param = (clk_id & 0xffff) << 16;
if (max) { if (max) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret) if (ret)
goto failed; goto failed;
} }
if (min) { if (min) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret) if (ret)
goto failed; goto failed;
} }
...@@ -1695,7 +1610,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, ...@@ -1695,7 +1610,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
if (max > 0) { if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff)); param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL); param, NULL);
if (ret) if (ret)
goto out; goto out;
...@@ -1703,7 +1618,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, ...@@ -1703,7 +1618,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
if (min > 0) { if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff)); param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL); param, NULL);
if (ret) if (ret)
goto out; goto out;
...@@ -1738,7 +1653,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, ...@@ -1738,7 +1653,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
if (max > 0) { if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff)); param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
param, NULL); param, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1746,7 +1661,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, ...@@ -1746,7 +1661,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
if (min > 0) { if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff)); param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
param, NULL); param, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1867,7 +1782,7 @@ int smu_v11_0_set_power_source(struct smu_context *smu, ...@@ -1867,7 +1782,7 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
if (pwr_source < 0) if (pwr_source < 0)
return -EINVAL; return -EINVAL;
return smu_send_smc_msg_with_param(smu, return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_NotifyPowerSource, SMU_MSG_NotifyPowerSource,
pwr_source, pwr_source,
NULL); NULL);
...@@ -1895,7 +1810,7 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, ...@@ -1895,7 +1810,7 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex, SMU_MSG_GetDpmFreqByIndex,
param, param,
value); value);
......
...@@ -53,88 +53,6 @@ ...@@ -53,88 +53,6 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010024 #define smnMP1_FIRMWARE_FLAGS 0x3010024
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
int smu_v12_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i;
for (i = 0; i < adev->usec_timeout; i++) {
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
return cur_value == 0x1 ? 0 : -EIO;
udelay(1);
}
/* timeout means wrong logic */
return -ETIME;
}
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg);
if (index < 0)
return index;
mutex_lock(&smu->message_lock);
ret = smu_v12_0_wait_for_response(smu);
if (ret) {
dev_err(adev->dev, "Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v12_0_wait_for_response(smu);
if (ret) {
dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
goto out;
}
if (read_arg) {
ret = smu_v12_0_read_arg(smu, read_arg);
if (ret) {
dev_err(adev->dev, "Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
goto out;
}
}
out:
mutex_unlock(&smu->message_lock);
return ret;
}
int smu_v12_0_check_fw_status(struct smu_context *smu) int smu_v12_0_check_fw_status(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
...@@ -190,9 +108,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) ...@@ -190,9 +108,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0; return 0;
if (gate) if (gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL); return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else else
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL); return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
} }
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
...@@ -200,7 +118,7 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) ...@@ -200,7 +118,7 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0; return 0;
return smu_v12_0_send_msg_with_param(smu, return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGfxCGPG, SMU_MSG_SetGfxCGPG,
enable ? 1 : 0, enable ? 1 : 0,
NULL); NULL);
...@@ -236,10 +154,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) ...@@ -236,10 +154,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500; int ret = 0, timeout = 500;
if (enable) { if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else { } else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */ /* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) { while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
...@@ -279,7 +197,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu) ...@@ -279,7 +197,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
} }
int smu_v12_0_mode2_reset(struct smu_context *smu){ int smu_v12_0_mode2_reset(struct smu_context *smu){
return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
} }
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
...@@ -293,39 +211,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ ...@@ -293,39 +211,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_FCLK: case SMU_FCLK:
case SMU_MCLK: case SMU_MCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_SOCCLK: case SMU_SOCCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
case SMU_VCLK: case SMU_VCLK:
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret) if (ret)
return ret; return ret;
break; break;
...@@ -342,12 +260,12 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) ...@@ -342,12 +260,12 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
int ret = 0; int ret = 0;
if (driver_table->mc_address) { if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh, SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address), upper_32_bits(driver_table->mc_address),
NULL); NULL);
if (!ret) if (!ret)
ret = smu_send_smc_msg_with_param(smu, ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow, SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address), lower_32_bits(driver_table->mc_address),
NULL); NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment