Commit 21226f02 authored by Tao Zhou's avatar Tao Zhou Committed by Alex Deucher

drm/amdgpu: replace reset_error_count with amdgpu_ras_reset_error_count

Simplify the code.
Signed-off-by: default avatarTao Zhou <tao.zhou1@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9d7a965e
...@@ -3578,9 +3578,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) ...@@ -3578,9 +3578,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res) if (adev->asic_reset_res)
goto fail; goto fail;
if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
} else { } else {
task_barrier_full(&hive->tb); task_barrier_full(&hive->tb);
...@@ -5201,9 +5199,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, ...@@ -5201,9 +5199,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (!r && amdgpu_ras_intr_triggered()) { if (!r && amdgpu_ras_intr_triggered()) {
list_for_each_entry(tmp_adev, device_list_handle, reset_list) { list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops && amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
} }
amdgpu_ras_intr_cleared(); amdgpu_ras_intr_cleared();
......
...@@ -908,7 +908,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm ...@@ -908,7 +908,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
adev->gmc.xgmi.num_physical_nodes == 0) adev->gmc.xgmi.num_physical_nodes == 0)
return 0; return 0;
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
return amdgpu_ras_block_late_init(adev, ras_block); return amdgpu_ras_block_late_init(adev, ras_block);
} }
...@@ -1075,7 +1075,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, ...@@ -1075,7 +1075,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break; break;
} }
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
err_data->ue_count += ue_cnt; err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt; err_data->ce_count += ce_cnt;
......
...@@ -1587,13 +1587,8 @@ static int gmc_v9_0_late_init(void *handle) ...@@ -1587,13 +1587,8 @@ static int gmc_v9_0_late_init(void *handle)
} }
if (!amdgpu_persistent_edc_harvesting_supported(adev)) { if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
} }
r = amdgpu_gmc_ras_late_init(adev); r = amdgpu_gmc_ras_late_init(adev);
......
...@@ -1749,11 +1749,8 @@ static int sdma_v4_0_late_init(void *handle) ...@@ -1749,11 +1749,8 @@ static int sdma_v4_0_late_init(void *handle)
sdma_v4_0_setup_ulv(adev); sdma_v4_0_setup_ulv(adev);
if (!amdgpu_persistent_edc_harvesting_supported(adev)) { if (!amdgpu_persistent_edc_harvesting_supported(adev))
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
}
return 0; return 0;
} }
......
...@@ -1276,11 +1276,8 @@ static int sdma_v4_4_2_late_init(void *handle) ...@@ -1276,11 +1276,8 @@ static int sdma_v4_4_2_late_init(void *handle)
.cb = sdma_v4_4_2_process_ras_data_cb, .cb = sdma_v4_4_2_process_ras_data_cb,
}; };
#endif #endif
if (!amdgpu_persistent_edc_harvesting_supported(adev)) { if (!amdgpu_persistent_edc_harvesting_supported(adev))
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment