Commit 6f906814 authored by Tom St Denis's avatar Tom St Denis Committed by Alex Deucher

drm/amd/amdgpu: Fix VCE CG order and resume defaults

CG was being enabled in reverse sense from dpm/powerplay.
Also fix the default CLK_EN signal to enable all of the blocks.
Signed-off-by: default avatarTom St Denis <tom.stdenis@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f16fe6d3
...@@ -130,40 +130,35 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, ...@@ -130,40 +130,35 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
/* Set Override to disable Clock Gating */ /* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true); vce_v3_0_override_vce_clock_gating(adev, true);
if (!gated) { /* This function enables MGCG which is controlled by firmware.
/* Force CLOCK ON for VCE_CLOCK_GATING_B, With the clocks in the gated state the core is still
* {*_FORCE_ON, *_FORCE_OFF} = {1, 0} accessible but the firmware will throttle the clocks on the
* VREG can be FORCE ON or set to Dynamic, but can't be OFF fly as necessary.
*/ */
if (gated) {
tmp = data = RREG32(mmVCE_CLOCK_GATING_B); tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff; data |= 0x1ff;
data &= ~0xef0000; data &= ~0xef0000;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_CLOCK_GATING_B, data); WREG32(mmVCE_CLOCK_GATING_B, data);
/* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
* {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
*/
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000; data |= 0x3ff000;
data &= ~0xffc00000; data &= ~0xffc00000;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING, data); WREG32(mmVCE_UENC_CLOCK_GATING, data);
/* set VCE_UENC_CLOCK_GATING_2 */
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2; data |= 0x2;
data &= ~0x2; data &= ~0x00010000;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING_2, data); WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
/* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f; data |= 0x37f;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
/* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
...@@ -172,34 +167,27 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, ...@@ -172,34 +167,27 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else { } else {
/* Force CLOCK OFF for VCE_CLOCK_GATING_B,
* {*, *_FORCE_OFF} = {*, 1}
* set VREG to Dynamic, as it can't be OFF
*/
tmp = data = RREG32(mmVCE_CLOCK_GATING_B); tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010; data &= ~0x80010;
data |= 0xe70008; data |= 0xe70008;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_CLOCK_GATING_B, data); WREG32(mmVCE_CLOCK_GATING_B, data);
/* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
* Force ClOCK OFF takes precedent over Force CLOCK ON setting.
* {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
*/
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000; data |= 0xffc00000;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING, data); WREG32(mmVCE_UENC_CLOCK_GATING, data);
/* Set VCE_UENC_CLOCK_GATING_2 */
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000; data |= 0x10000;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING_2, data); WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
/* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0xffc00000; data &= ~0xffc00000;
if (tmp != data) if (tmp != data)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
/* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
...@@ -538,7 +526,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) ...@@ -538,7 +526,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
WREG32(mmVCE_CLOCK_GATING_B, 0xf7); WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
WREG32(mmVCE_LMI_CTRL, 0x00398000); WREG32(mmVCE_LMI_CTRL, 0x00398000);
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment