Commit 28a18bab authored by Rex Zhu's avatar Rex Zhu Committed by Alex Deucher

drm/amd/powerplay: add CG and PG support for carrizo

This adds clock and powergating support for CZ.

v2: squash in fixes
Signed-off-by: default avatarRex Zhu <Rex.Zhu@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bdecc20a
......@@ -3,7 +3,8 @@
# It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
hardwaremanager.o pp_acpi.o cz_hwmgr.o
hardwaremanager.o pp_acpi.o cz_hwmgr.o \
cz_clockpowergating.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
......
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "cz_clockpowergating.h"
#include "cz_ppsmc.h"
/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
0 GFX0L (3:0), (27:24),
1 GFX0H (7:4), (31:28),
2 GFX1L (3:0), (19:16),
3 GFX1H (7:4), (23:20),
4 DDIL (3:0), (11: 8),
5 DDIH (7:4), (15:12),
6 DDI2L (3:0), ( 3: 0),
7 DDI2H (7:4), ( 7: 4),
*/
#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
{
int ret = 0;
switch (block) {
case PHM_AsicBlock_UVD_MVC:
case PHM_AsicBlock_UVD:
case PHM_AsicBlock_UVD_HD:
case PHM_AsicBlock_UVD_SD:
if (gating == PHM_ClockGateSetting_StaticOff)
ret = cz_dpm_powerdown_uvd(hwmgr);
else
ret = cz_dpm_powerup_uvd(hwmgr);
break;
case PHM_AsicBlock_GFX:
default:
break;
}
return ret;
}
bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
{
return true;
}
int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
{
return 0;
}
int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
{
/* TODO */
return 0;
}
int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
{
/* TODO */
return 0;
}
int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
{
/* TODO */
return 0;
}
int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
{
/* TODO */
return 0;
}
int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
{
/* TODO */
return 0;
}
static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
{
return 0;
}
static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
{
return 0;
}
int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
uint32_t dpm_features = 0;
if (enable &&
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= UVD_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
return 0;
}
int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
uint32_t dpm_features = 0;
if (enable && phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= VCE_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
return 0;
}
int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (cz_hwmgr->uvd_power_gated == bgate)
return 0;
cz_hwmgr->uvd_power_gated = bgate;
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, true);
cz_dpm_powerdown_uvd(hwmgr);
} else {
cz_dpm_powerup_uvd(hwmgr);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
return 0;
}
int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating)) {
if (cz_hwmgr->vce_power_gated != bgate) {
if (bgate) {
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
cz_enable_disable_vce_dpm(hwmgr, false);
/* TODO: to figure out why vce can't be poweroff*/
cz_hwmgr->vce_power_gated = true;
} else {
cz_dpm_powerup_vce(hwmgr);
cz_hwmgr->vce_power_gated = false;
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
}
} else {
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
if (!cz_hwmgr->vce_power_gated)
cz_dpm_update_vce_dpm(hwmgr);
return 0;
}
static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
/*we don't need an exit table here, because there is only D3 cold on Kv*/
{ phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
{ phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
/* to do { NULL, cz_tf_xdma_power_gating_enable }, */
{ NULL, NULL }
};
struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
0,
PHM_MasterTableFlag_None,
cz_enable_clock_power_gatings_list
};
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _CZ_CLOCK_POWER_GATING_H_
#define _CZ_CLOCK_POWER_GATING_H_
#include "cz_hwmgr.h"
#include "pp_asicblocks.h"
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
extern struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
extern struct phm_master_table_header cz_phm_disable_clock_power_gatings_master;
extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _CZ_CLOCK_POWER_GATING_H_ */
......@@ -28,12 +28,23 @@
#include "processpptables.h"
#include "cgs_common.h"
#include "smu/smu_8_0_d.h"
#include "smu8_fusion.h"
#include "smu/smu_8_0_sh_mask.h"
#include "smumgr.h"
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "cz_ppsmc.h"
#include "cz_hwmgr.h"
#include "power_state.h"
#include "cz_clockpowergating.h"
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
#define CURRENT_NB_VID_MASK 0xff000000
#define CURRENT_NB_VID__SHIFT 24
#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
#define CURRENT_GFX_VID_MASK 0xff000000
#define CURRENT_GFX_VID__SHIFT 24
static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
......@@ -45,6 +56,46 @@ static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_
return (struct cz_power_state *)hw_ps;
}
static const struct cz_power_state *cast_const_PhwCzPowerState(
const struct pp_hw_power_state *hw_ps)
{
if (PhwCz_Magic != hw_ps->magic)
return NULL;
return (struct cz_power_state *)hw_ps;
}
uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
struct phm_vce_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.vce_clocl_voltage_dependency_table;
switch (msg) {
case PPSMC_MSG_SetEclkSoftMin:
case PPSMC_MSG_SetEclkHardMin:
for (i = 0; i < (int)ptable->count; i++) {
if (clock <= ptable->entries[i].ecclk)
break;
}
break;
case PPSMC_MSG_SetEclkSoftMax:
case PPSMC_MSG_SetEclkHardMax:
for (i = ptable->count - 1; i >= 0; i--) {
if (clock >= ptable->entries[i].ecclk)
break;
}
break;
default:
break;
}
return i;
}
static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
......@@ -75,6 +126,37 @@ static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
return i;
}
static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
struct phm_uvd_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.uvd_clocl_voltage_dependency_table;
switch (msg) {
case PPSMC_MSG_SetUvdSoftMin:
case PPSMC_MSG_SetUvdHardMin:
for (i = 0; i < (int)ptable->count; i++) {
if (clock <= ptable->entries[i].vclk)
break;
}
break;
case PPSMC_MSG_SetUvdSoftMax:
case PPSMC_MSG_SetUvdHardMax:
for (i = ptable->count - 1; i >= 0; i--) {
if (clock >= ptable->entries[i].vclk)
break;
}
break;
default:
break;
}
return i;
}
static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
......@@ -504,6 +586,175 @@ static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
void *input, void *output,
void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0;
unsigned long level;
unsigned long stable_pstate_sclk;
struct PP_Clocks clocks;
unsigned long percentage;
cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
level = cz_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk;
else
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
/*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/
clock = clocks.engineClock;
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkHardMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.hard_min_clk,
PPSMC_MSG_SetSclkHardMin));
}
clock = cz_hwmgr->sclk_dpm.soft_min_clk;
/* update minimum clocks for Stable P-State feature */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState)) {
percentage = 75;
/*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */
stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
percentage) / 100;
if (clock < stable_pstate_sclk)
clock = stable_pstate_sclk;
} else {
if (clock < hwmgr->gfx_arbiter.sclk)
clock = hwmgr->gfx_arbiter.sclk;
}
if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
cz_hwmgr->sclk_dpm.soft_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState) &&
cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
}
return 0;
}
static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
void *input, void *output,
void *storage, int result)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
/* TO DO get from dal PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetMinDeepSleepSclk,
CZ_MIN_DEEP_SLEEP_SCLK);
}
return 0;
}
static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
void *input, void *output,
void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr =
(struct cz_hwmgr *)(hwmgr->backend);
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetWatermarkFrequency,
cz_hwmgr->sclk_dpm.soft_max_clk);
return 0;
}
static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr,
void *input, void *output,
void *storage, int result)
{
return 0;
}
static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
void *input, void *output,
void *storage, int result)
{
int ret = 0;
struct cz_hwmgr *cz_hwmgr =
(struct cz_hwmgr *)(hwmgr->backend);
unsigned long dpm_features = 0;
if (!cz_hwmgr->is_nb_dpm_enabled &&
cz_hwmgr->is_nb_dpm_enabled_by_driver) { /* also depend on dal NBPStateDisableRequired */
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features);
if (ret == 0)
cz_hwmgr->is_nb_dpm_enabled = true;
}
return ret;
}
static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
void *input, void *output,
void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr =
(struct cz_hwmgr *)(hwmgr->backend);
const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
if (cz_hwmgr->sys_info.nb_dpm_enable) {
if (pnew_state->action == FORCE_HIGH)
smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_DisableLowMemoryPstate);
else
smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_EnableLowMemoryPstate);
}
return 0;
}
static struct phm_master_table_item cz_set_power_state_list[] = {
{NULL, cz_tf_update_sclk_limit},
{NULL, cz_tf_set_deep_sleep_sclk_threshold},
{NULL, cz_tf_set_watermark_threshold},
{NULL, cz_tf_set_enabled_levels},
{NULL, cz_tf_enable_nb_dpm},
{NULL, cz_tf_update_low_mem_pstate},
{NULL, NULL}
};
static struct phm_master_table_header cz_set_power_state_master = {
0,
PHM_MasterTableFlag_None,
cz_set_power_state_list
};
static struct phm_master_table_item cz_setup_asic_list[] = {
{NULL, cz_tf_reset_active_process_mask},
......@@ -649,6 +900,56 @@ static struct phm_master_table_header cz_enable_dpm_master = {
cz_enable_dpm_list
};
static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
struct cz_power_state *cz_ps =
cast_PhwCzPowerState(&prequest_ps->hardware);
const struct cz_power_state *cz_current_ps =
cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct PP_Clocks clocks;
bool force_high;
unsigned long num_of_active_displays = 4;
cz_ps->evclk = hwmgr->vce_arbiter.evclk;
cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
cz_ps->need_dfs_bypass = true;
cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 ||
hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0);
cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
/* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */
/* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
else
clocks.memoryClock = 0;
if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
|| (num_of_active_displays >= 3);
cz_ps->action = cz_current_ps->action;
if ((force_high == false) && (cz_ps->action == FORCE_HIGH))
cz_ps->action = CANCEL_FORCE_HIGH;
else if ((force_high == true) && (cz_ps->action != FORCE_HIGH))
cz_ps->action = FORCE_HIGH;
else
cz_ps->action = DO_NOTHING;
return 0;
}
static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
......@@ -676,10 +977,28 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
&(hwmgr->disable_dynamic_state_management));
if (result != 0) {
printk(KERN_ERR "[ powerplay ] Fail to disable_dynamic_state\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
&(hwmgr->enable_dynamic_state_management));
if (result != 0) {
printk(KERN_ERR "[ powerplay ] Fail to enable_dynamic_state\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_set_power_state_master,
&(hwmgr->set_power_state));
if (result != 0) {
printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
if (result != 0) {
printk(KERN_ERR "[ powerplay ] Fail to construct enable_clock_power_gatings\n");
return result;
}
return result;
}
......@@ -793,6 +1112,138 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return ret;
}
int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
return smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
return 0;
}
int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_uvd_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.uvd_clocl_voltage_dependency_table;
if (!bgate) {
/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState)) {
cz_hwmgr->uvd_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].vclk;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetUvdHardMin,
cz_get_uvd_level(hwmgr,
cz_hwmgr->uvd_dpm.hard_min_clk,
PPSMC_MSG_SetUvdHardMin));
cz_enable_disable_uvd_dpm(hwmgr, true);
} else
cz_enable_disable_uvd_dpm(hwmgr, true);
} else
cz_enable_disable_uvd_dpm(hwmgr, false);
return 0;
}
int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_vce_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.vce_clocl_voltage_dependency_table;
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState)) {
cz_hwmgr->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetEclkHardMin,
cz_get_eclk_level(hwmgr,
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
/*EPR# 419220 -HW limitation to to */
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetEclkHardMin,
cz_get_eclk_level(hwmgr,
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
}
return 0;
}
int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
return cz_hwmgr->sys_info.bootup_uma_clock;
}
static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct cz_power_state *cz_ps;
if (hwmgr == NULL)
return -EINVAL;
ps = hwmgr->request_ps;
if (ps == NULL)
return -EINVAL;
cz_ps = cast_PhwCzPowerState(&ps->hardware);
if (low)
return cz_ps->levels[0].engineClock;
else
return cz_ps->levels[cz_ps->level-1].engineClock;
}
static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
......@@ -871,15 +1322,83 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct cz_power_state);
}
static void
cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
struct phm_vce_clock_voltage_dependency_table *vce_table =
hwmgr->dyn_state.vce_clocl_voltage_dependency_table;
struct phm_uvd_clock_voltage_dependency_table *uvd_table =
hwmgr->dyn_state.uvd_clocl_voltage_dependency_table;
uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
uint32_t sclk, vclk, dclk, ecclk, tmp;
uint16_t vddnb, vddgfx;
if (sclk_index >= NUM_SCLK_LEVELS) {
seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
} else {
sclk = table->entries[sclk_index].clk;
seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
}
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
if (!cz_hwmgr->uvd_power_gated) {
if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
} else {
vclk = uvd_table->entries[uvd_index].vclk;
dclk = uvd_table->entries[uvd_index].dclk;
seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
}
}
seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
if (!cz_hwmgr->vce_power_gated) {
if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
} else {
ecclk = vce_table->entries[vce_index].ecclk;
seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
}
}
}
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
.asic_setup = NULL,
.apply_state_adjust_rules = cz_apply_state_adjust_rules,
.force_dpm_level = cz_dpm_force_dpm_level,
.get_power_state_size = cz_get_power_state_size,
.powerdown_uvd = cz_dpm_powerdown_uvd,
.powergate_uvd = cz_dpm_powergate_uvd,
.powergate_vce = cz_dpm_powergate_vce,
.get_mclk = cz_dpm_get_mclk,
.get_sclk = cz_dpm_get_sclk,
.patch_boot_state = cz_dpm_patch_boot_state,
.get_pp_table_entry = cz_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
.print_current_perforce_level = cz_print_current_perforce_level,
};
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
......
......@@ -32,6 +32,7 @@
#define CZ_AT_DFLT 30
#define CZ_MAX_HARDWARE_POWERLEVELS 8
#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
#define CZ_MIN_DEEP_SLEEP_SCLK 800
/* Carrizo device IDs */
#define DEVICE_ID_CZ_9870 0x9870
......@@ -198,6 +199,9 @@ struct cz_hwmgr {
struct cz_sys_info sys_info;
struct cz_power_level boot_power_level;
struct cz_power_state *cz_current_ps;
struct cz_power_state *cz_requested_ps;
uint32_t mgcg_cgtt_local0;
uint32_t mgcg_cgtt_local1;
......@@ -299,11 +303,15 @@ struct cz_hwmgr {
uint32_t max_sclk_level;
uint32_t num_of_clk_entries;
struct cz_power_state *cz_ps;
};
struct pp_hwmgr;
int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
#endif /* _CZ_HWMGR_H_ */
......@@ -23,6 +23,7 @@
#include <linux/errno.h>
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "power_state.h"
#include "pp_acpi.h"
#include "amd_acpi.h"
......@@ -55,6 +56,17 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
}
bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
{
return hwmgr->block_hw_access;
}
int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block)
{
hwmgr->block_hw_access = block;
return 0;
}
int phm_setup_asic(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
......@@ -62,13 +74,33 @@ int phm_setup_asic(struct pp_hwmgr *hwmgr)
if (NULL != hwmgr->hwmgr_func->asic_setup)
return hwmgr->hwmgr_func->asic_setup(hwmgr);
} else {
return phm_dispatch_table (hwmgr, &(hwmgr->setup_asic),
return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic),
NULL, NULL);
}
return 0;
}
int phm_set_power_state(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pcurrent_state,
const struct pp_hw_power_state *pnew_power_state)
{
struct phm_set_power_state_input states;
states.pcurrent_state = pcurrent_state;
states.pnew_state = pnew_power_state;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface)) {
if (NULL != hwmgr->hwmgr_func->power_state_set)
return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
} else {
return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL);
}
return 0;
}
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
......@@ -76,9 +108,62 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
} else {
return phm_dispatch_table (hwmgr,
return phm_dispatch_table(hwmgr,
&(hwmgr->enable_dynamic_state_management),
NULL, NULL);
}
return 0;
}
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
{
if (hwmgr->hwmgr_func->force_dpm_level != NULL)
return hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
return 0;
}
int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *adjusted_ps,
const struct pp_power_state *current_ps)
{
if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
return hwmgr->hwmgr_func->apply_state_adjust_rules(
hwmgr,
adjusted_ps,
current_ps);
return 0;
}
int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
return 0;
}
int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate)
{
if (hwmgr->hwmgr_func->powergate_uvd != NULL)
return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
return 0;
}
int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate)
{
if (hwmgr->hwmgr_func->powergate_vce != NULL)
return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
return 0;
}
int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface)) {
if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
} else {
return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL);
}
return 0;
}
......@@ -201,3 +201,13 @@ void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
value, mask);
}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
}
bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
{
return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
}
......@@ -23,7 +23,12 @@
#ifndef _HARDWARE_MANAGER_H_
#define _HARDWARE_MANAGER_H_
struct pp_hwmgr;
struct pp_hw_power_state;
struct pp_power_state;
enum amd_dpm_forced_level;
/* Automatic Power State Throttling */
enum PHM_AutoThrottleSource
......@@ -206,6 +211,24 @@ struct pp_hw_descriptor {
uint32_t hw_caps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES];
};
enum PHM_PerformanceLevelDesignation {
PHM_PerformanceLevelDesignation_Activity,
PHM_PerformanceLevelDesignation_PowerContainment
};
typedef enum PHM_PerformanceLevelDesignation PHM_PerformanceLevelDesignation;
struct PHM_PerformanceLevel {
uint32_t coreClock;
uint32_t memory_clock;
uint32_t vddc;
uint32_t vddci;
uint32_t nonLocalMemoryFreq;
uint32_t nonLocalMemoryWidth;
};
typedef struct PHM_PerformanceLevel PHM_PerformanceLevel;
/* Function for setting a platform cap */
static inline void phm_cap_set(uint32_t *caps,
enum phm_platform_caps c)
......@@ -226,6 +249,20 @@ static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps
(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)))));
}
#define PP_PCIEGenInvalid 0xffff
enum PP_PCIEGen {
PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */
PP_PCIEGen2, /*PCIE 2.0 - Transfer rate of 5.0 GT/s */
PP_PCIEGen3 /*PCIE 3.0 - Transfer rate of 8.0 GT/s */
};
typedef enum PP_PCIEGen PP_PCIEGen;
#define PP_Min_PCIEGen PP_PCIEGen1
#define PP_Max_PCIEGen PP_PCIEGen3
#define PP_Min_PCIELane 1
#define PP_Max_PCIELane 32
enum phm_clock_Type {
PHM_DispClock = 1,
PHM_SClock,
......@@ -273,8 +310,22 @@ struct phm_clocks {
uint32_t num_of_entries;
uint32_t clock[MAX_NUM_CLOCKS];
};
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pcurrent_state,
const struct pp_hw_power_state *pnew_power_state);
extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *adjusted_ps,
const struct pp_power_state *current_ps);
extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
#endif /* _HARDWARE_MANAGER_H_ */
......@@ -23,6 +23,7 @@
#ifndef _HWMGR_H_
#define _HWMGR_H_
#include <linux/seq_file.h>
#include "amd_powerplay.h"
#include "pp_instance.h"
#include "hardwaremanager.h"
......@@ -85,6 +86,11 @@ typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input,
typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr);
struct phm_set_power_state_input {
const struct pp_hw_power_state *pcurrent_state;
const struct pp_hw_power_state *pnew_state;
};
struct phm_acp_arbiter {
uint32_t acpclk;
};
......@@ -252,11 +258,34 @@ struct pp_hwmgr_func {
int (*backend_fini)(struct pp_hwmgr *hw_mgr);
int (*asic_setup)(struct pp_hwmgr *hw_mgr);
int (*get_power_state_size)(struct pp_hwmgr *hw_mgr);
int (*force_dpm_level)(struct pp_hwmgr *hw_mgr, enum amd_dpm_forced_level level);
int (*dynamic_state_management_enable)(struct pp_hwmgr *hw_mgr);
int (*patch_boot_state)(struct pp_hwmgr *hwmgr, struct pp_hw_power_state *hw_ps);
int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr, unsigned long, struct pp_power_state *);
int (*apply_state_adjust_rules)(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps);
int (*force_dpm_level)(struct pp_hwmgr *hw_mgr,
enum amd_dpm_forced_level level);
int (*dynamic_state_management_enable)(
struct pp_hwmgr *hw_mgr);
int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps);
int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr,
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state);
void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
struct seq_file *m);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
......@@ -416,7 +445,7 @@ struct pp_hwmgr {
struct pp_smumgr *smumgr;
const void *soft_pp_table;
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
struct phm_gfx_arbiter gfx_arbiter;
struct phm_acp_arbiter acp_arbiter;
struct phm_uvd_arbiter uvd_arbiter;
......@@ -430,6 +459,8 @@ struct pp_hwmgr {
struct phm_runtime_table_header setup_asic;
struct phm_runtime_table_header disable_dynamic_state_management;
struct phm_runtime_table_header enable_dynamic_state_management;
struct phm_runtime_table_header set_power_state;
struct phm_runtime_table_header enable_clock_power_gatings;
const struct pp_hwmgr_func *hwmgr_func;
const struct pp_table_func *pptable_func;
struct pp_power_state *ps;
......@@ -471,6 +502,11 @@ extern void phm_wait_for_indirect_register_unequal(
uint32_t value,
uint32_t mask);
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
......
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef PP_ASICBLOCKS_H
#define PP_ASICBLOCKS_H
enum PHM_AsicBlock {
PHM_AsicBlock_GFX,
PHM_AsicBlock_UVD_MVC,
PHM_AsicBlock_UVD,
PHM_AsicBlock_UVD_HD,
PHM_AsicBlock_UVD_SD,
PHM_AsicBlock_Count
};
enum PHM_ClockGateSetting {
PHM_ClockGateSetting_StaticOn,
PHM_ClockGateSetting_StaticOff,
PHM_ClockGateSetting_Dynamic
};
struct phm_asic_blocks {
bool gfx : 1;
bool uvd : 1;
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment