Commit c92a428f authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.5' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

radeon and amdgpu fixes for 4.5.  Highlights:
- powerplay fixes for amdgpu
- race fixes in the sub-allocator in radeon and amdgpu
- hibernate fix for amdgpu
- fix a possible circular locking in userptr handling in amdgpu

* 'drm-fixes-4.5' of git://people.freedesktop.org/~agd5f/linux: (21 commits)
  drm/amdgpu: fix issue with overlapping userptrs
  drm/radeon: hold reference to fences in radeon_sa_bo_new
  drm/amdgpu: remove unnecessary forward declaration
  drm/amdgpu: hold reference to fences in amdgpu_sa_bo_new (v2)
  drm/amdgpu: fix s4 resume
  drm/amdgpu/cz: plumb pg flags through to powerplay
  drm/amdgpu/tonga: plumb pg flags through to powerplay
  drma/dmgpu: move cg and pg flags into shared headers
  drm/amdgpu: remove unused cg defines
  drm/amdgpu: add a cgs interface to fetch cg and pg flags
  drm/amd/powerplay/tonga: disable vce pg
  drm/amd/powerplay/tonga: disable uvd pg
  drm/amd/powerplay/cz: disable vce pg
  drm/amd/powerplay/cz: disable uvd pg
  drm/amdgpu: be consistent with uvd cg flags
  drm/amdgpu: clean up vce pg flags for cz/st
  drm/amdgpu: handle vce pg flags properly
  drm/amdgpu: handle uvd pg flags properly
  drm/amdgpu/dpm/ci: switch over to the common pcie caps interface
  drm/amdgpu/cik: don't mess with aspm if gpu is root bus
  ...
parents 388f7b1d cc1de6e8
...@@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs; ...@@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission; extern int amdgpu_sched_hw_submission;
extern int amdgpu_enable_semaphores; extern int amdgpu_enable_semaphores;
extern int amdgpu_powerplay; extern int amdgpu_powerplay;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
...@@ -132,47 +134,6 @@ extern int amdgpu_powerplay; ...@@ -132,47 +134,6 @@ extern int amdgpu_powerplay;
#define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14) #define AMDGPU_RESET_VCE1 (1 << 14)
/* CG block flags */
#define AMDGPU_CG_BLOCK_GFX (1 << 0)
#define AMDGPU_CG_BLOCK_MC (1 << 1)
#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
#define AMDGPU_CG_BLOCK_UVD (1 << 3)
#define AMDGPU_CG_BLOCK_VCE (1 << 4)
#define AMDGPU_CG_BLOCK_HDP (1 << 5)
#define AMDGPU_CG_BLOCK_BIF (1 << 6)
/* CG flags */
#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
/* PG flags */
#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
#define AMDGPU_PG_SUPPORT_CP (1 << 5)
#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
/* GFX current status */ /* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
#define AMDGPU_GFX_SAFE_MODE 0x00000001L #define AMDGPU_GFX_SAFE_MODE 0x00000001L
...@@ -606,8 +567,6 @@ struct amdgpu_sa_manager { ...@@ -606,8 +567,6 @@ struct amdgpu_sa_manager {
uint32_t align; uint32_t align;
}; };
struct amdgpu_sa_bo;
/* sub-allocation buffer */ /* sub-allocation buffer */
struct amdgpu_sa_bo { struct amdgpu_sa_bo {
struct list_head olist; struct list_head olist;
...@@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); ...@@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags); uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
......
...@@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device, ...@@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
case CGS_SYSTEM_INFO_PCIE_MLW: case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask; sys_info->value = adev->pm.pcie_mlw_mask;
break; break;
case CGS_SYSTEM_INFO_CG_FLAGS:
sys_info->value = adev->cg_flags;
break;
case CGS_SYSTEM_INFO_PG_FLAGS:
sys_info->value = adev->pg_flags;
break;
default: default:
return -ENODEV; return -ENODEV;
} }
......
...@@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) ...@@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
/* post card */ /* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context); if (!amdgpu_card_posted(adev))
amdgpu_atom_asic_init(adev->mode_info.atom_context);
r = amdgpu_resume(adev); r = amdgpu_resume(adev);
if (r)
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
amdgpu_fence_driver_resume(adev); amdgpu_fence_driver_resume(adev);
r = amdgpu_ib_ring_tests(adev); if (resume) {
if (r) r = amdgpu_ib_ring_tests(adev);
DRM_ERROR("ib ring test failed (%d).\n", r); if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
r = amdgpu_late_init(adev); r = amdgpu_late_init(adev);
if (r) if (r)
...@@ -1933,80 +1938,97 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -1933,80 +1938,97 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
return r; return r;
} }
#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
void amdgpu_get_pcie_info(struct amdgpu_device *adev) void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{ {
u32 mask; u32 mask;
int ret; int ret;
if (pci_is_root_bus(adev->pdev->bus)) if (amdgpu_pcie_gen_cap)
return; adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
if (amdgpu_pcie_gen2 == 0) if (amdgpu_pcie_lane_cap)
return; adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
if (adev->flags & AMD_IS_APU) /* covers APUs as well */
if (pci_is_root_bus(adev->pdev->bus)) {
if (adev->pm.pcie_gen_mask == 0)
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
if (adev->pm.pcie_mlw_mask == 0)
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
return; return;
}
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); if (adev->pm.pcie_gen_mask == 0) {
if (!ret) { ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | if (!ret) {
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
if (mask & DRM_PCIE_SPEED_25)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; if (mask & DRM_PCIE_SPEED_25)
if (mask & DRM_PCIE_SPEED_50) adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; if (mask & DRM_PCIE_SPEED_50)
if (mask & DRM_PCIE_SPEED_80) adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; if (mask & DRM_PCIE_SPEED_80)
} adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
ret = drm_pcie_get_max_link_width(adev->ddev, &mask); } else {
if (!ret) { adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
switch (mask) { }
case 32: }
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | if (adev->pm.pcie_mlw_mask == 0) {
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | if (!ret) {
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | switch (mask) {
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | case 32:
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
break; CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
case 16: CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | break;
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | case 16:
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
break; CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
case 12: CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | break;
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); case 12:
break; adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
case 8: CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break;
break; case 8:
case 4: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break; break;
case 2: case 4:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
break; CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
case 1: break;
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; case 2:
break; adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
default: CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break; break;
case 1:
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
} else {
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
} }
} }
} }
......
...@@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32; ...@@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
int amdgpu_enable_semaphores = 0; int amdgpu_enable_semaphores = 0;
int amdgpu_powerplay = -1; int amdgpu_powerplay = -1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
...@@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = ...@@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
module_param_named(powerplay, amdgpu_powerplay, int, 0444); module_param_named(powerplay, amdgpu_powerplay, int, 0444);
#endif #endif
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
static struct pci_device_id pciidlist[] = { static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */ /* Kaveri */
......
...@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) { list_for_each_entry(bo, &node->bos, mn_list) {
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
end))
continue; continue;
r = amdgpu_bo_reserve(bo, true); r = amdgpu_bo_reserve(bo, true);
......
...@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, ...@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
if (fences[i]) if (fences[i])
fences[count++] = fences[i]; fences[count++] = fence_get(fences[i]);
if (count) { if (count) {
spin_unlock(&sa_manager->wq.lock); spin_unlock(&sa_manager->wq.lock);
t = fence_wait_any_timeout(fences, count, false, t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
for (i = 0; i < count; ++i)
fence_put(fences[i]);
r = (t > 0) ? 0 : t; r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock); spin_lock(&sa_manager->wq.lock);
} else { } else {
......
...@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) ...@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
return !!gtt->userptr; return !!gtt->userptr;
} }
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long size;
if (gtt == NULL)
return false;
if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
return false;
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
return true;
}
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "ci_dpm.h" #include "ci_dpm.h"
#include "gfx_v7_0.h" #include "gfx_v7_0.h"
#include "atom.h" #include "atom.h"
#include "amd_pcie.h"
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include "smu/smu_7_0_1_d.h" #include "smu/smu_7_0_1_d.h"
...@@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev) ...@@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
u8 frev, crev; u8 frev, crev;
struct ci_power_info *pi; struct ci_power_info *pi;
int ret; int ret;
u32 mask;
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
if (pi == NULL) if (pi == NULL)
return -ENOMEM; return -ENOMEM;
adev->pm.dpm.priv = pi; adev->pm.dpm.priv = pi;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); pi->sys_pcie_mask =
if (ret) (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
pi->sys_pcie_mask = 0; CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
else
pi->sys_pcie_mask = mask;
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
......
...@@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev) ...@@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
if (amdgpu_aspm == 0) if (amdgpu_aspm == 0)
return; return;
if (pci_is_root_bus(adev->pdev->bus))
return;
/* XXX double check APUs */ /* XXX double check APUs */
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return; return;
...@@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle) ...@@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_BONAIRE: case CHIP_BONAIRE:
adev->cg_flags = adev->cg_flags =
AMDGPU_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMDGPU_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ /*AMD_CG_SUPPORT_GFX_CGCG |*/
AMDGPU_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |
AMDGPU_CG_SUPPORT_GFX_CGTS | AMD_CG_SUPPORT_GFX_CGTS |
AMDGPU_CG_SUPPORT_GFX_CGTS_LS | AMD_CG_SUPPORT_GFX_CGTS_LS |
AMDGPU_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
AMDGPU_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_LS |
AMDGPU_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_MGCG |
AMDGPU_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMDGPU_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_LS |
AMDGPU_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_BIF_LS |
AMDGPU_CG_SUPPORT_VCE_MGCG | AMD_CG_SUPPORT_VCE_MGCG |
AMDGPU_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_UVD_MGCG |
AMDGPU_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_LS |
AMDGPU_CG_SUPPORT_HDP_MGCG; AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0; adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14; adev->external_rev_id = adev->rev_id + 0x14;
break; break;
case CHIP_HAWAII: case CHIP_HAWAII:
adev->cg_flags = adev->cg_flags =
AMDGPU_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMDGPU_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ /*AMD_CG_SUPPORT_GFX_CGCG |*/
AMDGPU_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |
AMDGPU_CG_SUPPORT_GFX_CGTS | AMD_CG_SUPPORT_GFX_CGTS |
AMDGPU_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
AMDGPU_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_LS |
AMDGPU_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_MGCG |
AMDGPU_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMDGPU_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_LS |
AMDGPU_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_BIF_LS |
AMDGPU_CG_SUPPORT_VCE_MGCG | AMD_CG_SUPPORT_VCE_MGCG |
AMDGPU_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_UVD_MGCG |
AMDGPU_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_LS |
AMDGPU_CG_SUPPORT_HDP_MGCG; AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0; adev->pg_flags = 0;
adev->external_rev_id = 0x28; adev->external_rev_id = 0x28;
break; break;
case CHIP_KAVERI: case CHIP_KAVERI:
adev->cg_flags = adev->cg_flags =
AMDGPU_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMDGPU_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ /*AMD_CG_SUPPORT_GFX_CGCG |*/
AMDGPU_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |
AMDGPU_CG_SUPPORT_GFX_CGTS | AMD_CG_SUPPORT_GFX_CGTS |
AMDGPU_CG_SUPPORT_GFX_CGTS_LS | AMD_CG_SUPPORT_GFX_CGTS_LS |
AMDGPU_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
AMDGPU_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMDGPU_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_LS |
AMDGPU_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_BIF_LS |
AMDGPU_CG_SUPPORT_VCE_MGCG | AMD_CG_SUPPORT_VCE_MGCG |
AMDGPU_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_UVD_MGCG |
AMDGPU_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_LS |
AMDGPU_CG_SUPPORT_HDP_MGCG; AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = adev->pg_flags =
/*AMDGPU_PG_SUPPORT_GFX_PG | /*AMD_PG_SUPPORT_GFX_PG |
AMDGPU_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_SMG |
AMDGPU_PG_SUPPORT_GFX_DMG |*/ AMD_PG_SUPPORT_GFX_DMG |*/
AMDGPU_PG_SUPPORT_UVD | AMD_PG_SUPPORT_UVD |
/*AMDGPU_PG_SUPPORT_VCE | /*AMD_PG_SUPPORT_VCE |
AMDGPU_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMDGPU_PG_SUPPORT_GDS | AMD_PG_SUPPORT_GDS |
AMDGPU_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_RLC_SMU_HS |
AMDGPU_PG_SUPPORT_ACP | AMD_PG_SUPPORT_ACP |
AMDGPU_PG_SUPPORT_SAMU |*/ AMD_PG_SUPPORT_SAMU |*/
0; 0;
if (adev->pdev->device == 0x1312 || if (adev->pdev->device == 0x1312 ||
adev->pdev->device == 0x1316 || adev->pdev->device == 0x1316 ||
...@@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle) ...@@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle)
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
adev->cg_flags = adev->cg_flags =
AMDGPU_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMDGPU_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ /*AMD_CG_SUPPORT_GFX_CGCG |*/
AMDGPU_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGLS |
AMDGPU_CG_SUPPORT_GFX_CGTS | AMD_CG_SUPPORT_GFX_CGTS |
AMDGPU_CG_SUPPORT_GFX_CGTS_LS | AMD_CG_SUPPORT_GFX_CGTS_LS |
AMDGPU_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
AMDGPU_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMDGPU_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_LS |
AMDGPU_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_BIF_LS |
AMDGPU_CG_SUPPORT_VCE_MGCG | AMD_CG_SUPPORT_VCE_MGCG |
AMDGPU_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_UVD_MGCG |
AMDGPU_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_LS |
AMDGPU_CG_SUPPORT_HDP_MGCG; AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = adev->pg_flags =
/*AMDGPU_PG_SUPPORT_GFX_PG | /*AMD_PG_SUPPORT_GFX_PG |
AMDGPU_PG_SUPPORT_GFX_SMG | */ AMD_PG_SUPPORT_GFX_SMG | */
AMDGPU_PG_SUPPORT_UVD | AMD_PG_SUPPORT_UVD |
/*AMDGPU_PG_SUPPORT_VCE | /*AMD_PG_SUPPORT_VCE |
AMDGPU_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMDGPU_PG_SUPPORT_GDS | AMD_PG_SUPPORT_GDS |
AMDGPU_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_RLC_SMU_HS |
AMDGPU_PG_SUPPORT_SAMU |*/ AMD_PG_SUPPORT_SAMU |*/
0; 0;
if (adev->asic_type == CHIP_KABINI) { if (adev->asic_type == CHIP_KABINI) {
if (adev->rev_id == 0) if (adev->rev_id == 0)
......
...@@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, ...@@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
{ {
u32 orig, data; u32 orig, data;
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
} else { } else {
...@@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, ...@@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
{ {
u32 orig, data; u32 orig, data;
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
data |= 0x100; data |= 0x100;
if (orig != data) if (orig != data)
......
...@@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev) ...@@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->gfx_pg_threshold = 500; pi->gfx_pg_threshold = 500;
pi->caps_fps = true; pi->caps_fps = true;
/* uvd */ /* uvd */
pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
pi->caps_uvd_dpm = true; pi->caps_uvd_dpm = true;
/* vce */ /* vce */
pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
pi->caps_vce_dpm = true; pi->caps_vce_dpm = true;
/* acp */ /* acp */
pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
pi->caps_acp_dpm = true; pi->caps_acp_dpm = true;
pi->caps_stable_power_state = false; pi->caps_stable_power_state = false;
......
...@@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) ...@@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
gfx_v7_0_enable_gui_idle_interrupt(adev, true); gfx_v7_0_enable_gui_idle_interrupt(adev, true);
tmp = gfx_v7_0_halt_rlc(adev); tmp = gfx_v7_0_halt_rlc(adev);
...@@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) ...@@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
{ {
u32 data, orig, tmp = 0; u32 data, orig, tmp = 0;
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
orig = data = RREG32(mmCP_MEM_SLP_CNTL); orig = data = RREG32(mmCP_MEM_SLP_CNTL);
data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
if (orig != data) if (orig != data)
...@@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) ...@@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
gfx_v7_0_update_rlc(adev, tmp); gfx_v7_0_update_rlc(adev, tmp);
if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
orig = data = RREG32(mmCGTS_SM_CTRL_REG); orig = data = RREG32(mmCGTS_SM_CTRL_REG);
data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
(adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
...@@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, ...@@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
u32 data, orig; u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
else else
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
...@@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, ...@@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
u32 data, orig; u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
else else
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
...@@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) ...@@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
u32 data, orig; u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
data &= ~0x8000; data &= ~0x8000;
else else
data |= 0x8000; data |= 0x8000;
...@@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) ...@@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
u32 data, orig; u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
data &= ~0x2000; data &= ~0x2000;
else else
data |= 0x2000; data |= 0x2000;
...@@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, ...@@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
{ {
u32 data, orig; u32 data, orig;
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
if (orig != data) if (orig != data)
...@@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, ...@@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
u32 data, orig; u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
else else
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
...@@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, ...@@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
u32 data, orig; u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL); orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
else else
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
...@@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, ...@@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
static void gfx_v7_0_init_pg(struct amdgpu_device *adev) static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
{ {
if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMDGPU_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_SMG |
AMDGPU_PG_SUPPORT_GFX_DMG | AMD_PG_SUPPORT_GFX_DMG |
AMDGPU_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMDGPU_PG_SUPPORT_GDS | AMD_PG_SUPPORT_GDS |
AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
gfx_v7_0_init_gfx_cgpg(adev); gfx_v7_0_init_gfx_cgpg(adev);
gfx_v7_0_enable_cp_pg(adev, true); gfx_v7_0_enable_cp_pg(adev, true);
gfx_v7_0_enable_gds_pg(adev, true); gfx_v7_0_enable_gds_pg(adev, true);
...@@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev) ...@@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
{ {
if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMDGPU_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_SMG |
AMDGPU_PG_SUPPORT_GFX_DMG | AMD_PG_SUPPORT_GFX_DMG |
AMDGPU_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMDGPU_PG_SUPPORT_GDS | AMD_PG_SUPPORT_GDS |
AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v7_0_update_gfx_pg(adev, false); gfx_v7_0_update_gfx_pg(adev, false);
if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
gfx_v7_0_enable_cp_pg(adev, false); gfx_v7_0_enable_cp_pg(adev, false);
gfx_v7_0_enable_gds_pg(adev, false); gfx_v7_0_enable_gds_pg(adev, false);
} }
...@@ -5527,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle, ...@@ -5527,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle,
if (state == AMD_PG_STATE_GATE) if (state == AMD_PG_STATE_GATE)
gate = true; gate = true;
if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMDGPU_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_SMG |
AMDGPU_PG_SUPPORT_GFX_DMG | AMD_PG_SUPPORT_GFX_DMG |
AMDGPU_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMDGPU_PG_SUPPORT_GDS | AMD_PG_SUPPORT_GDS |
AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v7_0_update_gfx_pg(adev, gate); gfx_v7_0_update_gfx_pg(adev, gate);
if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
gfx_v7_0_enable_cp_pg(adev, gate); gfx_v7_0_enable_cp_pg(adev, gate);
gfx_v7_0_enable_gds_pg(adev, gate); gfx_v7_0_enable_gds_pg(adev, gate);
} }
......
...@@ -792,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, ...@@ -792,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]); orig = data = RREG32(mc_cg_registers[i]);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i]; data |= mc_cg_ls_en[i];
else else
data &= ~mc_cg_ls_en[i]; data &= ~mc_cg_ls_en[i];
...@@ -809,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, ...@@ -809,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]); orig = data = RREG32(mc_cg_registers[i]);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i]; data |= mc_cg_en[i];
else else
data &= ~mc_cg_en[i]; data &= ~mc_cg_en[i];
...@@ -825,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, ...@@ -825,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2); orig = data = RREG32_PCIE(ixPCIE_CNTL2);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
...@@ -848,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, ...@@ -848,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL); orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
...@@ -864,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, ...@@ -864,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS); orig = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
......
...@@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) ...@@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->voltage_drop_t = 0; pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false; pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */ pi->caps_fps = false; /* true? */
pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
pi->caps_uvd_dpm = true; pi->caps_uvd_dpm = true;
pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
pi->caps_stable_p_state = false; pi->caps_stable_p_state = false;
ret = kv_parse_sys_info_table(adev); ret = kv_parse_sys_info_table(adev);
......
...@@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, ...@@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
{ {
u32 orig, data; u32 orig, data;
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data = 0xfff; data = 0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
...@@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle, ...@@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle,
bool gate = false; bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (state == AMD_CG_STATE_GATE) if (state == AMD_CG_STATE_GATE)
gate = true; gate = true;
...@@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle, ...@@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between * revisit this when there is a cleaner line between
* the smc and the hw blocks * the smc and the hw blocks
*/ */
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev); uvd_v4_2_stop(adev);
......
...@@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, ...@@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
static int uvd_v5_0_set_clockgating_state(void *handle, static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
return 0; return 0;
} }
...@@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle, ...@@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle,
*/ */
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
uvd_v5_0_stop(adev); uvd_v5_0_stop(adev);
return 0; return 0;
......
...@@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ...@@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uvd_v6_0_mc_resume(adev); uvd_v6_0_mc_resume(adev);
/* Set dynamic clock gating in S/W control mode */ /* Set dynamic clock gating in S/W control mode */
if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
cz_set_uvd_clock_gating_branches(adev, false); cz_set_uvd_clock_gating_branches(adev, false);
else else
...@@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, ...@@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false; bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0; return 0;
if (enable) { if (enable) {
...@@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle, ...@@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle,
*/ */
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
return 0; return 0;
......
...@@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) ...@@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
{ {
bool sw_cg = false; bool sw_cg = false;
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
if (sw_cg) if (sw_cg)
vce_v2_0_set_sw_cg(adev, true); vce_v2_0_set_sw_cg(adev, true);
else else
...@@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle, ...@@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle,
*/ */
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
return 0;
if (state == AMD_PG_STATE_GATE) if (state == AMD_PG_STATE_GATE)
/* XXX do we need a vce_v2_0_stop()? */ /* XXX do we need a vce_v2_0_stop()? */
return 0; return 0;
......
...@@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32_P(mmVCE_STATUS, 0, ~1); WREG32_P(mmVCE_STATUS, 0, ~1);
/* Set Clock-Gating off */ /* Set Clock-Gating off */
if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false); vce_v3_0_set_vce_sw_clock_gating(adev, false);
if (r) { if (r) {
...@@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, ...@@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false; bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i; int i;
if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0; return 0;
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
...@@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle, ...@@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle,
*/ */
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
return 0;
if (state == AMD_PG_STATE_GATE) if (state == AMD_PG_STATE_GATE)
/* XXX do we need a vce_v3_0_stop()? */ /* XXX do we need a vce_v3_0_stop()? */
return 0; return 0;
......
...@@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle) ...@@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle)
case CHIP_STONEY: case CHIP_STONEY:
adev->has_uvd = true; adev->has_uvd = true;
adev->cg_flags = 0; adev->cg_flags = 0;
/* Disable UVD pg */ adev->pg_flags = 0;
adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x1; adev->external_rev_id = adev->rev_id + 0x1;
break; break;
default: default:
......
...@@ -85,6 +85,38 @@ enum amd_powergating_state { ...@@ -85,6 +85,38 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE, AMD_PG_STATE_UNGATE,
}; };
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
#define AMD_CG_SUPPORT_MC_LS (1 << 8)
#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
#define AMD_PG_SUPPORT_GFX_DMG (1 << 2)
#define AMD_PG_SUPPORT_UVD (1 << 3)
#define AMD_PG_SUPPORT_VCE (1 << 4)
#define AMD_PG_SUPPORT_CP (1 << 5)
#define AMD_PG_SUPPORT_GDS (1 << 6)
#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7)
#define AMD_PG_SUPPORT_SDMA (1 << 8)
#define AMD_PG_SUPPORT_ACP (1 << 9)
#define AMD_PG_SUPPORT_SAMU (1 << 10)
enum amd_pm_state_type { enum amd_pm_state_type {
/* not used for dpm */ /* not used for dpm */
POWER_STATE_TYPE_DEFAULT, POWER_STATE_TYPE_DEFAULT,
......
...@@ -109,6 +109,8 @@ enum cgs_system_info_id { ...@@ -109,6 +109,8 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
CGS_SYSTEM_INFO_PCIE_GEN_INFO, CGS_SYSTEM_INFO_PCIE_GEN_INFO,
CGS_SYSTEM_INFO_PCIE_MLW, CGS_SYSTEM_INFO_PCIE_MLW,
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_ID_MAXIMUM, CGS_SYSTEM_INFO_ID_MAXIMUM,
}; };
......
...@@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) ...@@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{ {
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
uint32_t i; uint32_t i;
struct cgs_system_info sys_info = {0};
int result;
cz_hwmgr->gfx_ramp_step = 256*25/100; cz_hwmgr->gfx_ramp_step = 256*25/100;
...@@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) ...@@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableVoltageIsland); PHM_PlatformCaps_DisableVoltageIsland);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (!result) {
if (sys_info.value & AMD_PG_SUPPORT_UVD)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
if (sys_info.value & AMD_PG_SUPPORT_VCE)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
}
return 0; return 0;
} }
......
...@@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) ...@@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
phw_tonga_ulv_parm *ulv; phw_tonga_ulv_parm *ulv;
struct cgs_system_info sys_info = {0};
PP_ASSERT_WITH_CODE((NULL != hwmgr), PP_ASSERT_WITH_CODE((NULL != hwmgr),
"Invalid Parameter!", return -1;); "Invalid Parameter!", return -1;);
...@@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) ...@@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
data->vddc_phase_shed_control = 0; data->vddc_phase_shed_control = 0;
if (0 == result) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
struct cgs_system_info sys_info = {0}; PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (!result) {
if (sys_info.value & AMD_PG_SUPPORT_UVD)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
if (sys_info.value & AMD_PG_SUPPORT_VCE)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
}
if (0 == result) {
data->is_tlu_enabled = 0; data->is_tlu_enabled = 0;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
TONGA_MAX_HARDWARE_POWERLEVELS; TONGA_MAX_HARDWARE_POWERLEVELS;
......
...@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, ...@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* see if we can skip over some allocations */ /* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
for (i = 0; i < RADEON_NUM_RINGS; ++i)
radeon_fence_ref(fences[i]);
spin_unlock(&sa_manager->wq.lock); spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false); r = radeon_fence_wait_any(rdev, fences, false);
for (i = 0; i < RADEON_NUM_RINGS; ++i)
radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock); spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */ /* if we have nothing to wait for block */
if (r == -ENOENT) { if (r == -ENOENT) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment