Commit f3f0ea95 authored by Tom St Denis's avatar Tom St Denis Committed by Alex Deucher

drm/amd/amdgpu: Cleanup register access in VCE v3

Signed-off-by: default avatarTom St Denis <tom.stdenis@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 75cd45a4
...@@ -110,22 +110,13 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -110,22 +110,13 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{ {
u32 tmp, data; WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
if (override)
data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
else
data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
if (tmp != data)
WREG32(mmVCE_RB_ARB_CTRL, data);
} }
static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated) bool gated)
{ {
u32 tmp, data; u32 data;
/* Set Override to disable Clock Gating */ /* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true); vce_v3_0_override_vce_clock_gating(adev, true);
...@@ -136,64 +127,54 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, ...@@ -136,64 +127,54 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
fly as necessary. fly as necessary.
*/ */
if (gated) { if (gated) {
tmp = data = RREG32(mmVCE_CLOCK_GATING_B); data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff; data |= 0x1ff;
data &= ~0xef0000; data &= ~0xef0000;
if (tmp != data)
WREG32(mmVCE_CLOCK_GATING_B, data); WREG32(mmVCE_CLOCK_GATING_B, data);
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000; data |= 0x3ff000;
data &= ~0xffc00000; data &= ~0xffc00000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING, data); WREG32(mmVCE_UENC_CLOCK_GATING, data);
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2; data |= 0x2;
data &= ~0x00010000; data &= ~0x00010000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING_2, data); WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f; data |= 0x37f;
if (tmp != data)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8; 0x8;
if (tmp != data)
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else { } else {
tmp = data = RREG32(mmVCE_CLOCK_GATING_B); data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010; data &= ~0x80010;
data |= 0xe70008; data |= 0xe70008;
if (tmp != data)
WREG32(mmVCE_CLOCK_GATING_B, data); WREG32(mmVCE_CLOCK_GATING_B, data);
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000; data |= 0xffc00000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING, data); WREG32(mmVCE_UENC_CLOCK_GATING, data);
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000; data |= 0x10000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING_2, data); WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0xffc00000; data &= ~0xffc00000;
if (tmp != data)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8); 0x8);
if (tmp != data)
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} }
vce_v3_0_override_vce_clock_gating(adev, false); vce_v3_0_override_vce_clock_gating(adev, false);
...@@ -213,12 +194,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) ...@@ -213,12 +194,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
} }
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
WREG32_P(mmVCE_SOFT_RESET, WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10); mdelay(10);
WREG32_P(mmVCE_SOFT_RESET, 0, WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10); mdelay(10);
} }
...@@ -256,34 +234,22 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -256,34 +234,22 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx)) if (adev->vce.harvest_config & (1 << idx))
continue; continue;
if (idx == 0) WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
else
WREG32_P(mmGRBM_GFX_INDEX,
GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
vce_v3_0_mc_resume(adev, idx); vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
~VCE_STATUS__JOB_BUSY_MASK);
if (adev->asic_type >= CHIP_STONEY) if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else else
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
~VCE_VCPU_CNTL__CLK_EN_MASK);
WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(100); mdelay(100);
r = vce_v3_0_firmware_loaded(adev); r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */ /* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
if (r) { if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n"); DRM_ERROR("VCE not responding, giving up!!!\n");
...@@ -292,7 +258,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -292,7 +258,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
} }
} }
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
return 0; return 0;
...@@ -307,33 +273,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) ...@@ -307,33 +273,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx)) if (adev->vce.harvest_config & (1 << idx))
continue; continue;
if (idx == 0) WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
else
WREG32_P(mmGRBM_GFX_INDEX,
GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
if (adev->asic_type >= CHIP_STONEY) if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
else else
WREG32_P(mmVCE_VCPU_CNTL, 0, WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
~VCE_VCPU_CNTL__CLK_EN_MASK);
/* hold on ECPU */ /* hold on ECPU */
WREG32_P(mmVCE_SOFT_RESET, WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
/* clear BUSY flag */ /* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
/* Set Clock-Gating off */ /* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false); vce_v3_0_set_vce_sw_clock_gating(adev, false);
} }
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
return 0; return 0;
...@@ -561,9 +519,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) ...@@ -561,9 +519,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
} }
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
} }
static bool vce_v3_0_is_idle(void *handle) static bool vce_v3_0_is_idle(void *handle)
...@@ -599,7 +555,6 @@ static int vce_v3_0_check_soft_reset(void *handle) ...@@ -599,7 +555,6 @@ static int vce_v3_0_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
u32 tmp;
/* According to VCE team , we should use VCE_STATUS instead /* According to VCE team , we should use VCE_STATUS instead
* SRBM_STATUS.VCE_BUSY bit for busy status checking. * SRBM_STATUS.VCE_BUSY bit for busy status checking.
...@@ -614,23 +569,17 @@ static int vce_v3_0_check_soft_reset(void *handle) ...@@ -614,23 +569,17 @@ static int vce_v3_0_check_soft_reset(void *handle)
* *
* VCE team suggest use bit 3--bit 6 for busy status check * VCE team suggest use bit 3--bit 6 for busy status check
*/ */
tmp = RREG32(mmGRBM_GFX_INDEX); WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
WREG32(mmGRBM_GFX_INDEX, tmp);
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
} }
tmp = RREG32(mmGRBM_GFX_INDEX); WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
WREG32(mmGRBM_GFX_INDEX, tmp);
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
} }
tmp = RREG32(mmGRBM_GFX_INDEX); WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
tmp = REG_SET_FIELD(tmp, GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
WREG32(mmGRBM_GFX_INDEX, tmp);
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
...@@ -718,9 +667,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, ...@@ -718,9 +667,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
{ {
DRM_DEBUG("IH: VCE\n"); DRM_DEBUG("IH: VCE\n");
WREG32_P(mmVCE_SYS_INT_STATUS, WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
switch (entry->src_data) { switch (entry->src_data) {
case 0: case 0:
...@@ -767,13 +714,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, ...@@ -767,13 +714,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i)) if (adev->vce.harvest_config & (1 << i))
continue; continue;
if (i == 0) WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
else
WREG32_P(mmGRBM_GFX_INDEX,
GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
if (enable) { if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
...@@ -792,7 +733,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, ...@@ -792,7 +733,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable); vce_v3_0_set_vce_sw_clock_gating(adev, enable);
} }
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment