Commit e702a680 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: implement sdma3 check/pre/post_soft_reset

Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e4ae0fc3
...@@ -1733,6 +1733,7 @@ struct amdgpu_sdma { ...@@ -1733,6 +1733,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src illegal_inst_irq;
int num_instances; int num_instances;
uint32_t srbm_soft_reset;
}; };
/* /*
......
...@@ -1978,7 +1978,6 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) ...@@ -1978,7 +1978,6 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
......
...@@ -1320,27 +1320,78 @@ static int sdma_v3_0_wait_for_idle(void *handle) ...@@ -1320,27 +1320,78 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int sdma_v3_0_soft_reset(void *handle) static int sdma_v3_0_check_soft_reset(void *handle)
{ {
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2); u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
/* sdma0 */ (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
}
if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
/* sdma1 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
} }
if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
adev->sdma.srbm_soft_reset = srbm_soft_reset;
} else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
adev->sdma.srbm_soft_reset = 0;
}
return 0;
}
static int sdma_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
}
return 0;
}
static int sdma_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
sdma_v3_0_gfx_resume(adev);
sdma_v3_0_rlc_resume(adev);
}
return 0;
}
static int sdma_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
if (srbm_soft_reset) { if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET); tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset; tmp |= srbm_soft_reset;
...@@ -1559,6 +1610,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { ...@@ -1559,6 +1610,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.resume = sdma_v3_0_resume, .resume = sdma_v3_0_resume,
.is_idle = sdma_v3_0_is_idle, .is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle, .wait_for_idle = sdma_v3_0_wait_for_idle,
.check_soft_reset = sdma_v3_0_check_soft_reset,
.pre_soft_reset = sdma_v3_0_pre_soft_reset,
.post_soft_reset = sdma_v3_0_post_soft_reset,
.soft_reset = sdma_v3_0_soft_reset, .soft_reset = sdma_v3_0_soft_reset,
.set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state, .set_powergating_state = sdma_v3_0_set_powergating_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment