Commit 450da2ef authored by James Zhu's avatar James Zhu Committed by Alex Deucher

drm/amdgpu: fix unused variable

SOC15_WAIT_ON_RREG's return value needn't always been handled by
caller. new design is to fix this kind of unused variable.
Signed-off-by: default avatarJames Zhu <James.Zhu@amd.com>
Reported-by: default avatarkernel test robot <lkp@intel.com>
Acked-by: default avatarLeo Liu <leo.liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a20ace1b
...@@ -230,9 +230,9 @@ static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev) ...@@ -230,9 +230,9 @@ static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev)
data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
SOC15_WAIT_ON_RREG(JPEG, 0, r = SOC15_WAIT_ON_RREG(JPEG, 0,
mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) { if (r) {
DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
...@@ -261,9 +261,9 @@ static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev) ...@@ -261,9 +261,9 @@ static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev)
data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) { if (r) {
DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
...@@ -677,10 +677,10 @@ static bool jpeg_v2_0_is_idle(void *handle) ...@@ -677,10 +677,10 @@ static bool jpeg_v2_0_is_idle(void *handle)
static int jpeg_v2_0_wait_for_idle(void *handle) static int jpeg_v2_0_wait_for_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret;
SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
return ret; return ret;
} }
......
...@@ -449,15 +449,15 @@ static bool jpeg_v2_5_is_idle(void *handle) ...@@ -449,15 +449,15 @@ static bool jpeg_v2_5_is_idle(void *handle)
static int jpeg_v2_5_wait_for_idle(void *handle) static int jpeg_v2_5_wait_for_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, ret = 0; int i, ret;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i)) if (adev->jpeg.harvest_config & (1 << i))
continue; continue;
SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS, ret = SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -266,9 +266,9 @@ static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -266,9 +266,9 @@ static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
SOC15_WAIT_ON_RREG(JPEG, 0, r = SOC15_WAIT_ON_RREG(JPEG, 0,
mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) { if (r) {
DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
...@@ -301,9 +301,9 @@ static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device* adev) ...@@ -301,9 +301,9 @@ static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device* adev)
data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) { if (r) {
DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
...@@ -461,11 +461,11 @@ static bool jpeg_v3_0_is_idle(void *handle) ...@@ -461,11 +461,11 @@ static bool jpeg_v3_0_is_idle(void *handle)
static int jpeg_v3_0_wait_for_idle(void *handle) static int jpeg_v3_0_wait_for_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret;
SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
if (ret) if (ret)
return ret; return ret;
......
...@@ -50,7 +50,8 @@ ...@@ -50,7 +50,8 @@
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value) WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \
({ int ret = 0; \
do { \ do { \
uint32_t old_ = 0; \ uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
...@@ -71,7 +72,9 @@ ...@@ -71,7 +72,9 @@
break; \ break; \
} \ } \
} \ } \
} while (0) } while (0); \
ret; \
})
#define WREG32_RLC(reg, value) \ #define WREG32_RLC(reg, value) \
do { \ do { \
......
...@@ -683,7 +683,6 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s ...@@ -683,7 +683,6 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s
static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
{ {
uint32_t data = 0; uint32_t data = 0;
int ret;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
...@@ -699,7 +698,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -699,7 +698,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
| 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
} else { } else {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
...@@ -713,7 +712,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -713,7 +712,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
} }
/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
...@@ -729,7 +728,6 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -729,7 +728,6 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
{ {
uint32_t data = 0; uint32_t data = 0;
int ret;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */ /* Before power off, this indicator has to be turned on */
...@@ -764,7 +762,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) ...@@ -764,7 +762,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
} }
} }
...@@ -1113,15 +1111,15 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) ...@@ -1113,15 +1111,15 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
*/ */
static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
{ {
int ret_code, tmp; int tmp;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
/* put VCPU into reset */ /* put VCPU into reset */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
...@@ -1130,7 +1128,7 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) ...@@ -1130,7 +1128,7 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
/* disable VCPU clock */ /* disable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
...@@ -1154,30 +1152,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) ...@@ -1154,30 +1152,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
{ {
int ret_code = 0;
uint32_t tmp; uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* wait for read ptr to be equal to write ptr */ /* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* disable dynamic power gating mode */ /* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
...@@ -1220,9 +1217,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1220,9 +1217,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
ret_code = 0; ret_code = 0;
if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) { if (!ret_code) {
/* pause DPG non-jpeg */ /* pause DPG non-jpeg */
...@@ -1230,7 +1227,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1230,7 +1227,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
/* Restore */ /* Restore */
ring = &adev->vcn.inst->ring_enc[0]; ring = &adev->vcn.inst->ring_enc[0];
...@@ -1252,7 +1249,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1252,7 +1249,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
} else { } else {
/* unpause dpg non-jpeg, no need to wait */ /* unpause dpg non-jpeg, no need to wait */
...@@ -1276,9 +1273,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1276,9 +1273,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
ret_code = 0; ret_code = 0;
if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) { if (!ret_code) {
/* Make sure JPRG Snoop is disabled before sending the pause */ /* Make sure JPRG Snoop is disabled before sending the pause */
...@@ -1291,7 +1288,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1291,7 +1288,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
/* Restore */ /* Restore */
ring = &adev->jpeg.inst->ring_dec; ring = &adev->jpeg.inst->ring_dec;
...@@ -1313,7 +1310,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1313,7 +1310,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
} else { } else {
/* unpause dpg jpeg, no need to wait */ /* unpause dpg jpeg, no need to wait */
...@@ -1336,10 +1333,10 @@ static bool vcn_v1_0_is_idle(void *handle) ...@@ -1336,10 +1333,10 @@ static bool vcn_v1_0_is_idle(void *handle)
static int vcn_v1_0_wait_for_idle(void *handle) static int vcn_v1_0_wait_for_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret;
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE, ret); UVD_STATUS__IDLE);
return ret; return ret;
} }
......
...@@ -697,7 +697,6 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev) ...@@ -697,7 +697,6 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
{ {
uint32_t data = 0; uint32_t data = 0;
int ret;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
...@@ -716,7 +715,7 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -716,7 +715,7 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF, ret); UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
} else { } else {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
...@@ -729,7 +728,7 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -729,7 +728,7 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT); | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF);
} }
/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS, /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
...@@ -747,7 +746,6 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) ...@@ -747,7 +746,6 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
{ {
uint32_t data = 0; uint32_t data = 0;
int ret;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
...@@ -783,7 +781,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) ...@@ -783,7 +781,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT); | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
} }
} }
...@@ -1099,25 +1097,24 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) ...@@ -1099,25 +1097,24 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev) static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
{ {
int ret_code = 0;
uint32_t tmp; uint32_t tmp;
/* Wait for power status to be 1 */ /* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* wait for read ptr to be equal to write ptr */ /* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* disable dynamic power gating mode */ /* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
...@@ -1139,7 +1136,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) ...@@ -1139,7 +1136,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
} }
/* wait for uvd idle */ /* wait for uvd idle */
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
if (r) if (r)
return r; return r;
...@@ -1147,7 +1144,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) ...@@ -1147,7 +1144,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
if (r) if (r)
return r; return r;
...@@ -1158,7 +1155,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) ...@@ -1158,7 +1155,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
if (r) if (r)
return r; return r;
...@@ -1209,9 +1206,8 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1209,9 +1206,8 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
ret_code = 0; ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) { if (!ret_code) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
...@@ -1222,7 +1218,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1222,7 +1218,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
/* wait for ACK */ /* wait for ACK */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
/* Stall DPG before WPTR/RPTR reset */ /* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
...@@ -1259,7 +1255,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1259,7 +1255,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
} else { } else {
/* unpause dpg, no need to wait */ /* unpause dpg, no need to wait */
...@@ -1282,10 +1278,10 @@ static bool vcn_v2_0_is_idle(void *handle) ...@@ -1282,10 +1278,10 @@ static bool vcn_v2_0_is_idle(void *handle)
static int vcn_v2_0_wait_for_idle(void *handle) static int vcn_v2_0_wait_for_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret;
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE, ret); UVD_STATUS__IDLE);
return ret; return ret;
} }
......
...@@ -549,7 +549,6 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx ...@@ -549,7 +549,6 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
{ {
uint32_t data; uint32_t data;
int ret = 0;
int i; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
...@@ -589,7 +588,7 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) ...@@ -589,7 +588,7 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data); WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
...@@ -1302,25 +1301,24 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) ...@@ -1302,25 +1301,24 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{ {
int ret_code = 0;
uint32_t tmp; uint32_t tmp;
/* Wait for power status to be 1 */ /* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* wait for read ptr to be equal to write ptr */ /* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* disable dynamic power gating mode */ /* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
...@@ -1343,7 +1341,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ...@@ -1343,7 +1341,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
} }
/* wait for vcn idle */ /* wait for vcn idle */
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
if (r) if (r)
return r; return r;
...@@ -1351,7 +1349,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ...@@ -1351,7 +1349,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r) if (r)
return r; return r;
...@@ -1362,7 +1360,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ...@@ -1362,7 +1360,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r) if (r)
return r; return r;
...@@ -1412,8 +1410,8 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1412,8 +1410,8 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) { if (!ret_code) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
...@@ -1425,7 +1423,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1425,7 +1423,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
/* wait for ACK */ /* wait for ACK */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
/* Stall DPG before WPTR/RPTR reset */ /* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
...@@ -1458,13 +1456,13 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1458,13 +1456,13 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
} else { } else {
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
} }
...@@ -1701,8 +1699,8 @@ static int vcn_v2_5_wait_for_idle(void *handle) ...@@ -1701,8 +1699,8 @@ static int vcn_v2_5_wait_for_idle(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE, ret); UVD_STATUS__IDLE);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -482,7 +482,6 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx ...@@ -482,7 +482,6 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
{ {
uint32_t data = 0; uint32_t data = 0;
int ret;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
...@@ -502,7 +501,7 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int ...@@ -502,7 +501,7 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int
WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF, ret); UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
} else { } else {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
...@@ -519,7 +518,7 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int ...@@ -519,7 +518,7 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int
| 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF, ret); SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
} }
data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
...@@ -534,7 +533,6 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int ...@@ -534,7 +533,6 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int
static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst) static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
{ {
uint32_t data; uint32_t data;
int ret;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */ /* Before power off, this indicator has to be turned on */
...@@ -573,7 +571,7 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int ...@@ -573,7 +571,7 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int
| 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF, ret); SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
} }
} }
...@@ -588,7 +586,6 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int ...@@ -588,7 +586,6 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int
static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst) static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
{ {
uint32_t data; uint32_t data;
int ret = 0;
/* VCN disable CGC */ /* VCN disable CGC */
data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
...@@ -624,7 +621,7 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst) ...@@ -624,7 +621,7 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data); WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
...@@ -1142,25 +1139,24 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) ...@@ -1142,25 +1139,24 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{ {
int ret_code = 0;
uint32_t tmp; uint32_t tmp;
/* Wait for power status to be 1 */ /* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* wait for read ptr to be equal to write ptr */ /* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* disable dynamic power gating mode */ /* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
...@@ -1184,7 +1180,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev) ...@@ -1184,7 +1180,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
} }
/* wait for vcn idle */ /* wait for vcn idle */
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
if (r) if (r)
return r; return r;
...@@ -1192,7 +1188,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev) ...@@ -1192,7 +1188,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r) if (r)
return r; return r;
...@@ -1202,7 +1198,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev) ...@@ -1202,7 +1198,7 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r) if (r)
return r; return r;
...@@ -1259,9 +1255,8 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1259,9 +1255,8 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
ret_code = 0; ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) { if (!ret_code) {
/* pause DPG */ /* pause DPG */
...@@ -1271,7 +1266,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1271,7 +1266,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
/* wait for ACK */ /* wait for ACK */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
/* Restore */ /* Restore */
ring = &adev->vcn.inst[inst_idx].ring_enc[0]; ring = &adev->vcn.inst[inst_idx].ring_enc[0];
...@@ -1292,7 +1287,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1292,7 +1287,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
} else { } else {
/* unpause dpg, no need to wait */ /* unpause dpg, no need to wait */
...@@ -1542,8 +1537,8 @@ static int vcn_v3_0_wait_for_idle(void *handle) ...@@ -1542,8 +1537,8 @@ static int vcn_v3_0_wait_for_idle(void *handle)
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE, ret); UVD_STATUS__IDLE);
if (ret) if (ret)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment