Commit 6c339f37 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/powerplay: unify swSMU index to asic specific index mapping

By this we can drop redundant code.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 22f2447c
...@@ -35,7 +35,9 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$( ...@@ -35,7 +35,9 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY) include $(AMD_POWERPLAY)
POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o sienna_cichlid_ppt.o POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o \
smu_v12_0.o arcturus_ppt.o navi10_ppt.o \
renoir_ppt.o sienna_cichlid_ppt.o smu_cmn.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR)) AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "sienna_cichlid_ppt.h" #include "sienna_cichlid_ppt.h"
#include "renoir_ppt.h" #include "renoir_ppt.h"
#include "amd_pcie.h" #include "amd_pcie.h"
#include "smu_cmn.h"
/* /*
* DO NOT use these for err/warn/info/debug messages. * DO NOT use these for err/warn/info/debug messages.
...@@ -94,7 +95,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) ...@@ -94,7 +95,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
feature_mask[1], feature_mask[0]); feature_mask[1], feature_mask[0]);
for (i = 0; i < SMU_FEATURE_COUNT; i++) { for (i = 0; i < SMU_FEATURE_COUNT; i++) {
feature_index = smu_feature_get_index(smu, i); feature_index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
i);
if (feature_index < 0) if (feature_index < 0)
continue; continue;
sort_feature[feature_index] = i; sort_feature[feature_index] = i;
...@@ -405,7 +408,9 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int ...@@ -405,7 +408,9 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
struct smu_table_context *smu_table = &smu->smu_table; struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
struct smu_table *table = &smu_table->driver_table; struct smu_table *table = &smu_table->driver_table;
int table_id = smu_table_get_index(smu, table_index); int table_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_TABLE,
table_index);
uint32_t table_size; uint32_t table_size;
int ret = 0; int ret = 0;
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
...@@ -546,7 +551,9 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) ...@@ -546,7 +551,9 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
if (smu->is_apu) if (smu->is_apu)
return 1; return 1;
feature_id = smu_feature_get_index(smu, mask); feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
if (feature_id < 0) if (feature_id < 0)
return 0; return 0;
...@@ -565,7 +572,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, ...@@ -565,7 +572,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
struct smu_feature *feature = &smu->smu_feature; struct smu_feature *feature = &smu->smu_feature;
int feature_id; int feature_id;
feature_id = smu_feature_get_index(smu, mask); feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
if (feature_id < 0) if (feature_id < 0)
return -EINVAL; return -EINVAL;
...@@ -582,7 +591,9 @@ int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask ...@@ -582,7 +591,9 @@ int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask
int feature_id; int feature_id;
int ret = 0; int ret = 0;
feature_id = smu_feature_get_index(smu, mask); feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
if (feature_id < 0) if (feature_id < 0)
return 0; return 0;
...@@ -1314,7 +1325,9 @@ static int smu_disable_dpms(struct smu_context *smu) ...@@ -1314,7 +1325,9 @@ static int smu_disable_dpms(struct smu_context *smu)
*/ */
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
features_to_disable = U64_MAX & features_to_disable = U64_MAX &
~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)); ~(1ULL << smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
SMU_FEATURE_BACO_BIT));
ret = smu_feature_update_enable_state(smu, ret = smu_feature_update_enable_state(smu,
features_to_disable, features_to_disable,
0); 0);
...@@ -1882,7 +1895,9 @@ int smu_set_mp1_state(struct smu_context *smu, ...@@ -1882,7 +1895,9 @@ int smu_set_mp1_state(struct smu_context *smu,
} }
/* some asics may not support those messages */ /* some asics may not support those messages */
if (smu_msg_get_index(smu, msg) < 0) { if (smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg) < 0) {
mutex_unlock(&smu->mutex); mutex_unlock(&smu->mutex);
return 0; return 0;
} }
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/pci.h> #include <linux/pci.h>
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "smu_cmn.h"
/* /*
* DO NOT use these for err/warn/info/debug messages. * DO NOT use these for err/warn/info/debug messages.
...@@ -57,8 +58,6 @@ ...@@ -57,8 +58,6 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define MSG_MAP(msg, index, valid_in_vf) \
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \ #define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
[smu_feature] = {1, (arcturus_feature)} [smu_feature] = {1, (arcturus_feature)}
...@@ -79,7 +78,7 @@ ...@@ -79,7 +78,7 @@
/* possible frequency drift (1Mhz) */ /* possible frequency drift (1Mhz) */
#define EPSILON 1 #define EPSILON 1
static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
...@@ -142,7 +141,7 @@ static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { ...@@ -142,7 +141,7 @@ static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(ReadSerialNumBottom32, PPSMC_MSG_ReadSerialNumBottom32, 1), MSG_MAP(ReadSerialNumBottom32, PPSMC_MSG_ReadSerialNumBottom32, 1),
}; };
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = { static const struct cmn2asic_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK), CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK), CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK), CLK_MAP(SOCCLK, PPCLK_SOCCLK),
...@@ -153,7 +152,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = { ...@@ -153,7 +152,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(VCLK, PPCLK_VCLK), CLK_MAP(VCLK, PPCLK_VCLK),
}; };
static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = { static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER), FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK), FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_UCLK), FEA_MAP(DPM_UCLK),
...@@ -182,7 +181,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_CO ...@@ -182,7 +181,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_CO
FEA_MAP(TEMP_DEPENDENT_VMIN), FEA_MAP(TEMP_DEPENDENT_VMIN),
}; };
static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { static const struct cmn2asic_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE), TAB_MAP(PPTABLE),
TAB_MAP(AVFS), TAB_MAP(AVFS),
TAB_MAP(AVFS_PSM_DEBUG), TAB_MAP(AVFS_PSM_DEBUG),
...@@ -195,12 +194,12 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { ...@@ -195,12 +194,12 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF), TAB_MAP(ACTIVITY_MONITOR_COEFF),
}; };
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { static const struct cmn2asic_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC), PWR_MAP(AC),
PWR_MAP(DC), PWR_MAP(DC),
}; };
static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
...@@ -210,6 +209,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFI ...@@ -210,6 +209,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFI
static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_msg_mapping mapping; struct smu_11_0_msg_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT) if (index >= SMU_MSG_MAX_COUNT)
...@@ -223,10 +223,13 @@ static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) ...@@ -223,10 +223,13 @@ static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
return -EACCES; return -EACCES;
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index) static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_CLK_COUNT) if (index >= SMU_CLK_COUNT)
...@@ -239,10 +242,13 @@ static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index) ...@@ -239,10 +242,13 @@ static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index) static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_FEATURE_COUNT) if (index >= SMU_FEATURE_COUNT)
...@@ -254,10 +260,13 @@ static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t inde ...@@ -254,10 +260,13 @@ static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t inde
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index) static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_TABLE_COUNT) if (index >= SMU_TABLE_COUNT)
...@@ -270,10 +279,13 @@ static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index) ...@@ -270,10 +279,13 @@ static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index) static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_POWER_SOURCE_COUNT) if (index >= SMU_POWER_SOURCE_COUNT)
...@@ -286,10 +298,13 @@ static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index) ...@@ -286,10 +298,13 @@ static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (profile > PP_SMC_POWER_PROFILE_CUSTOM) if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
...@@ -300,6 +315,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER ...@@ -300,6 +315,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER
return -EINVAL; return -EINVAL;
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables) static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
...@@ -731,7 +748,9 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu, ...@@ -731,7 +748,9 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
if (!value) if (!value)
return -EINVAL; return -EINVAL;
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) if (clk_id < 0)
return -EINVAL; return -EINVAL;
...@@ -1301,7 +1320,9 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, ...@@ -1301,7 +1320,9 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus. * Not all profile modes are supported on arcturus.
*/ */
workload_type = smu_workload_get_type(smu, i); workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
if (workload_type < 0) if (workload_type < 0)
continue; continue;
...@@ -1425,7 +1446,9 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ...@@ -1425,7 +1446,9 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus. * Not all profile modes are supported on arcturus.
*/ */
workload_type = smu_workload_get_type(smu, profile_mode); workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0) { if (workload_type < 0) {
dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
return -EINVAL; return -EINVAL;
...@@ -2412,4 +2435,10 @@ static const struct pptable_funcs arcturus_ppt_funcs = { ...@@ -2412,4 +2435,10 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
void arcturus_set_ppt_funcs(struct smu_context *smu) void arcturus_set_ppt_funcs(struct smu_context *smu)
{ {
smu->ppt_funcs = &arcturus_ppt_funcs; smu->ppt_funcs = &arcturus_ppt_funcs;
smu->message_map = arcturus_message_map;
smu->clock_map = arcturus_clk_map;
smu->feature_map = arcturus_feature_mask_map;
smu->table_map = arcturus_table_map;
smu->pwr_src_map = arcturus_pwr_src_map;
smu->workload_map = arcturus_workload_map;
} }
...@@ -366,6 +366,17 @@ struct smu_umd_pstate_table { ...@@ -366,6 +366,17 @@ struct smu_umd_pstate_table {
struct pstates_clk_freq dclk_pstate; struct pstates_clk_freq dclk_pstate;
}; };
struct cmn2asic_msg_mapping {
int valid_mapping;
int map_to;
int valid_in_vf;
};
struct cmn2asic_mapping {
int valid_mapping;
int map_to;
};
#define WORKLOAD_POLICY_MAX 7 #define WORKLOAD_POLICY_MAX 7
struct smu_context struct smu_context
{ {
...@@ -373,6 +384,12 @@ struct smu_context ...@@ -373,6 +384,12 @@ struct smu_context
struct amdgpu_irq_src irq_source; struct amdgpu_irq_src irq_source;
const struct pptable_funcs *ppt_funcs; const struct pptable_funcs *ppt_funcs;
const struct cmn2asic_msg_mapping *message_map;
const struct cmn2asic_mapping *clock_map;
const struct cmn2asic_mapping *feature_map;
const struct cmn2asic_mapping *table_map;
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
struct mutex mutex; struct mutex mutex;
struct mutex sensor_lock; struct mutex sensor_lock;
struct mutex metrics_lock; struct mutex metrics_lock;
...@@ -604,6 +621,39 @@ typedef enum { ...@@ -604,6 +621,39 @@ typedef enum {
METRICS_CURR_FANSPEED, METRICS_CURR_FANSPEED,
} MetricsMember_t; } MetricsMember_t;
enum smu_cmn2asic_mapping_type {
CMN2ASIC_MAPPING_MSG,
CMN2ASIC_MAPPING_CLK,
CMN2ASIC_MAPPING_FEATURE,
CMN2ASIC_MAPPING_TABLE,
CMN2ASIC_MAPPING_PWR,
CMN2ASIC_MAPPING_WORKLOAD,
};
#define MSG_MAP(msg, index, valid_in_vf) \
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
#define FEA_MAP(fea) \
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
#define TAB_MAP(tab) \
[SMU_TABLE_##tab] = {1, TABLE_##tab}
#define TAB_MAP_VALID(tab) \
[SMU_TABLE_##tab] = {1, TABLE_##tab}
#define TAB_MAP_INVALID(tab) \
[SMU_TABLE_##tab] = {0, TABLE_##tab}
#define PWR_MAP(tab) \
[SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
#define WORKLOAD_MAP(profile, workload) \
[profile] = {1, (workload)}
int smu_load_microcode(struct smu_context *smu); int smu_load_microcode(struct smu_context *smu);
int smu_check_fw_status(struct smu_context *smu); int smu_check_fw_status(struct smu_context *smu);
......
...@@ -52,21 +52,6 @@ ...@@ -52,21 +52,6 @@
#define MAX_DPM_LEVELS 16 #define MAX_DPM_LEVELS 16
#define MAX_PCIE_CONF 2 #define MAX_PCIE_CONF 2
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
#define FEA_MAP(fea) \
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
#define TAB_MAP(tab) \
[SMU_TABLE_##tab] = {1, TABLE_##tab}
#define PWR_MAP(tab) \
[SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
#define WORKLOAD_MAP(profile, workload) \
[profile] = {1, (workload)}
#define CTF_OFFSET_EDGE 5 #define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5 #define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5 #define CTF_OFFSET_MEM 5
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "thm/thm_11_0_2_sh_mask.h" #include "thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h" #include "asic_reg/mp/mp_11_0_sh_mask.h"
#include "smu_cmn.h"
/* /*
* DO NOT use these for err/warn/info/debug messages. * DO NOT use these for err/warn/info/debug messages.
...@@ -64,10 +65,7 @@ ...@@ -64,10 +65,7 @@
FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
#define MSG_MAP(msg, index, valid_in_vf) \ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
...@@ -138,7 +136,7 @@ static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { ...@@ -138,7 +136,7 @@ static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0), MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
}; };
static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK), CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK), CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK), CLK_MAP(SOCCLK, PPCLK_SOCCLK),
...@@ -153,7 +151,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { ...@@ -153,7 +151,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(PHYCLK, PPCLK_PHYCLK), CLK_MAP(PHYCLK, PPCLK_PHYCLK),
}; };
static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = { static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER), FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK), FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_GFX_PACE), FEA_MAP(DPM_GFX_PACE),
...@@ -199,7 +197,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN ...@@ -199,7 +197,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(APCC_DFLL), FEA_MAP(APCC_DFLL),
}; };
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = { static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE), TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS), TAB_MAP(WATERMARKS),
TAB_MAP(AVFS), TAB_MAP(AVFS),
...@@ -214,12 +212,12 @@ static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = { ...@@ -214,12 +212,12 @@ static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PACE), TAB_MAP(PACE),
}; };
static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC), PWR_MAP(AC),
PWR_MAP(DC), PWR_MAP(DC),
}; };
static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
...@@ -231,6 +229,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE ...@@ -231,6 +229,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index) static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_msg_mapping mapping; struct smu_11_0_msg_mapping mapping;
if (index >= SMU_MSG_MAX_COUNT) if (index >= SMU_MSG_MAX_COUNT)
...@@ -245,10 +244,13 @@ static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index) ...@@ -245,10 +244,13 @@ static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
return -EACCES; return -EACCES;
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index) static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_CLK_COUNT) if (index >= SMU_CLK_COUNT)
...@@ -260,10 +262,13 @@ static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index) ...@@ -260,10 +262,13 @@ static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index) static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_FEATURE_COUNT) if (index >= SMU_FEATURE_COUNT)
...@@ -275,10 +280,13 @@ static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index) ...@@ -275,10 +280,13 @@ static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index) static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_TABLE_COUNT) if (index >= SMU_TABLE_COUNT)
...@@ -290,10 +298,13 @@ static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index) ...@@ -290,10 +298,13 @@ static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index) static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (index >= SMU_POWER_SOURCE_COUNT) if (index >= SMU_POWER_SOURCE_COUNT)
...@@ -305,11 +316,14 @@ static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index) ...@@ -305,11 +316,14 @@ static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
{ {
#if 0
struct smu_11_0_cmn2aisc_mapping mapping; struct smu_11_0_cmn2aisc_mapping mapping;
if (profile > PP_SMC_POWER_PROFILE_CUSTOM) if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
...@@ -321,6 +335,8 @@ static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P ...@@ -321,6 +335,8 @@ static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P
} }
return mapping.map_to; return mapping.map_to;
#endif
return 0;
} }
static bool is_asic_secure(struct smu_context *smu) static bool is_asic_secure(struct smu_context *smu)
...@@ -918,7 +934,9 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, ...@@ -918,7 +934,9 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
MetricsMember_t member_type; MetricsMember_t member_type;
int clk_id = 0; int clk_id = 0;
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) if (clk_id < 0)
return clk_id; return clk_id;
...@@ -956,7 +974,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu ...@@ -956,7 +974,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
DpmDescriptor_t *dpm_desc = NULL; DpmDescriptor_t *dpm_desc = NULL;
uint32_t clk_index = 0; uint32_t clk_index = 0;
clk_index = smu_clk_get_index(smu, clk_type); clk_index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
dpm_desc = &pptable->DpmDescriptor[clk_index]; dpm_desc = &pptable->DpmDescriptor[clk_index];
/* 0 - Fine grained DPM, 1 - Discrete DPM */ /* 0 - Fine grained DPM, 1 - Discrete DPM */
...@@ -1484,7 +1504,9 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) ...@@ -1484,7 +1504,9 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i); workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
...@@ -1613,7 +1635,9 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -1613,7 +1635,9 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
} }
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode); workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
...@@ -2427,4 +2451,10 @@ static const struct pptable_funcs navi10_ppt_funcs = { ...@@ -2427,4 +2451,10 @@ static const struct pptable_funcs navi10_ppt_funcs = {
void navi10_set_ppt_funcs(struct smu_context *smu) void navi10_set_ppt_funcs(struct smu_context *smu)
{ {
smu->ppt_funcs = &navi10_ppt_funcs; smu->ppt_funcs = &navi10_ppt_funcs;
smu->message_map = navi10_message_map;
smu->clock_map = navi10_clk_map;
smu->feature_map = navi10_feature_mask_map;
smu->table_map = navi10_table_map;
smu->pwr_src_map = navi10_pwr_src_map;
smu->workload_map = navi10_workload_map;
} }
This diff is collapsed.
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_cmn.h"
/*
* DO NOT use these for err/warn/info/debug messages.
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
* They are more MGPU friendly.
*/
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index)
{
struct cmn2asic_msg_mapping msg_mapping;
struct cmn2asic_mapping mapping;
switch (type) {
case CMN2ASIC_MAPPING_MSG:
if (index > SMU_MSG_MAX_COUNT ||
!smu->message_map)
return -EINVAL;
msg_mapping = smu->message_map[index];
if (!msg_mapping.valid_mapping)
return -EINVAL;
if (amdgpu_sriov_vf(smu->adev) &&
!msg_mapping.valid_in_vf)
return -EACCES;
return msg_mapping.map_to;
case CMN2ASIC_MAPPING_CLK:
if (index > SMU_CLK_COUNT ||
!smu->clock_map)
return -EINVAL;
mapping = smu->clock_map[index];
if (!mapping.valid_mapping)
return -EINVAL;
return mapping.map_to;
case CMN2ASIC_MAPPING_FEATURE:
if (index > SMU_FEATURE_COUNT ||
!smu->feature_map)
return -EINVAL;
mapping = smu->feature_map[index];
if (!mapping.valid_mapping)
return -EINVAL;
return mapping.map_to;
case CMN2ASIC_MAPPING_TABLE:
if (index > SMU_TABLE_COUNT ||
!smu->table_map)
return -EINVAL;
mapping = smu->table_map[index];
if (!mapping.valid_mapping)
return -EINVAL;
return mapping.map_to;
case CMN2ASIC_MAPPING_PWR:
if (index > SMU_POWER_SOURCE_COUNT ||
!smu->pwr_src_map)
return -EINVAL;
mapping = smu->pwr_src_map[index];
if (!mapping.valid_mapping)
return -EINVAL;
return mapping.map_to;
case CMN2ASIC_MAPPING_WORKLOAD:
if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
!smu->workload_map)
return -EINVAL;
mapping = smu->workload_map[index];
if (!mapping.valid_mapping)
return -EINVAL;
return mapping.map_to;
default:
return -EINVAL;
}
}
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __SMU_CMN_H__
#define __SMU_CMN_H__
#include "amdgpu_smu.h"
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index);
#endif
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "soc15_common.h" #include "soc15_common.h"
#include "atom.h" #include "atom.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
#include "smu_cmn.h"
#include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
...@@ -111,7 +112,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, ...@@ -111,7 +112,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0; int ret = 0, index = 0;
index = smu_msg_get_index(smu, msg); index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg);
if (index < 0) if (index < 0)
return index == -EACCES ? 0 : index; return index == -EACCES ? 0 : index;
...@@ -947,11 +950,13 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, ...@@ -947,11 +950,13 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int ret = 0; int ret = 0;
int clk_id; int clk_id;
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0)) (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
return 0; return 0;
clk_id = smu_clk_get_index(smu, clock_select); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clock_select);
if (clk_id < 0) if (clk_id < 0)
return -EINVAL; return -EINVAL;
...@@ -1062,7 +1067,8 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu, ...@@ -1062,7 +1067,8 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
return -EINVAL; return -EINVAL;
power_src = smu_power_get_index(smu, power_src = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_PWR,
smu->adev->pm.ac_power ? smu->adev->pm.ac_power ?
SMU_POWER_SOURCE_AC : SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC); SMU_POWER_SOURCE_DC);
...@@ -1729,7 +1735,9 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c ...@@ -1729,7 +1735,9 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
return 0; return 0;
} }
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) { if (clk_id < 0) {
ret = -EINVAL; ret = -EINVAL;
goto failed; goto failed;
...@@ -1761,7 +1769,9 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, ...@@ -1761,7 +1769,9 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
int ret = 0, clk_id = 0; int ret = 0, clk_id = 0;
uint32_t param; uint32_t param;
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) if (clk_id < 0)
return clk_id; return clk_id;
...@@ -1805,7 +1815,9 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, ...@@ -1805,7 +1815,9 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
if (!smu_clk_dpm_is_enabled(smu, clk_type)) if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0; return 0;
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) if (clk_id < 0)
return clk_id; return clk_id;
...@@ -1934,7 +1946,9 @@ int smu_v11_0_set_power_source(struct smu_context *smu, ...@@ -1934,7 +1946,9 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
{ {
int pwr_source; int pwr_source;
pwr_source = smu_power_get_index(smu, (uint32_t)power_src); pwr_source = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_PWR,
(uint32_t)power_src);
if (pwr_source < 0) if (pwr_source < 0)
return -EINVAL; return -EINVAL;
...@@ -1958,7 +1972,9 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, ...@@ -1958,7 +1972,9 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
if (!smu_clk_dpm_is_enabled(smu, clk_type)) if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0; return 0;
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_id < 0) if (clk_id < 0)
return clk_id; return clk_id;
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "smu_v12_0.h" #include "smu_v12_0.h"
#include "soc15_common.h" #include "soc15_common.h"
#include "atom.h" #include "atom.h"
#include "smu_cmn.h"
#include "asic_reg/mp/mp_12_0_0_offset.h" #include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h" #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
...@@ -95,7 +96,9 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, ...@@ -95,7 +96,9 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0; int ret = 0, index = 0;
index = smu_msg_get_index(smu, msg); index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg);
if (index < 0) if (index < 0)
return index; return index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment