Commit caae42f0 authored by yipechai's avatar yipechai Committed by Alex Deucher

drm/amdgpu: Optimize xxx_ras_late_init function of each ras block

1. Move calling ras block instance members from module internal
   function to the top calling xxx_ras_late_init.
2. Module internal function calls can only use parameter variables
   of xxx_ras_late_init instead of ras block instance members.
Signed-off-by: default avataryipechai <YiPeng.Chai@amd.com>
Reviewed-by: default avatarTao Zhou <tao.zhou1@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 20c43547
...@@ -625,11 +625,11 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) ...@@ -625,11 +625,11 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
int r; int r;
r = amdgpu_ras_block_late_init(adev, adev->gfx.ras_if); r = amdgpu_ras_block_late_init(adev, ras_block);
if (r) if (r)
return r; return r;
if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { if (amdgpu_ras_is_supported(adev, ras_block->block)) {
if (!amdgpu_persistent_edc_harvesting_supported(adev)) if (!amdgpu_persistent_edc_harvesting_supported(adev))
amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
...@@ -640,7 +640,7 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r ...@@ -640,7 +640,7 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
return 0; return 0;
late_fini: late_fini:
amdgpu_ras_block_late_fini(adev, adev->gfx.ras_if); amdgpu_ras_block_late_fini(adev, ras_block);
return r; return r;
} }
......
...@@ -452,7 +452,7 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) ...@@ -452,7 +452,7 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
int r; int r;
if (adev->umc.ras && adev->umc.ras->ras_block.ras_late_init) { if (adev->umc.ras && adev->umc.ras->ras_block.ras_late_init) {
r = adev->umc.ras->ras_block.ras_late_init(adev, NULL); r = adev->umc.ras->ras_block.ras_late_init(adev, adev->umc.ras_if);
if (r) if (r)
return r; return r;
} }
...@@ -464,7 +464,7 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) ...@@ -464,7 +464,7 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
} }
if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_late_init) { if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_late_init) {
r = adev->gmc.xgmi.ras->ras_block.ras_late_init(adev, NULL); r = adev->gmc.xgmi.ras->ras_block.ras_late_init(adev, adev->gmc.xgmi.ras_if);
if (r) if (r)
return r; return r;
} }
......
...@@ -25,11 +25,11 @@ ...@@ -25,11 +25,11 @@
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
int r; int r;
r = amdgpu_ras_block_late_init(adev, adev->nbio.ras_if); r = amdgpu_ras_block_late_init(adev, ras_block);
if (r) if (r)
return r; return r;
if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0); r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
if (r) if (r)
goto late_fini; goto late_fini;
...@@ -40,7 +40,7 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if * ...@@ -40,7 +40,7 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *
return 0; return 0;
late_fini: late_fini:
amdgpu_ras_block_late_fini(adev, adev->nbio.ras_if); amdgpu_ras_block_late_fini(adev, ras_block);
return r; return r;
} }
......
...@@ -91,11 +91,11 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, ...@@ -91,11 +91,11 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
{ {
int r, i; int r, i;
r = amdgpu_ras_block_late_init(adev, adev->sdma.ras_if); r = amdgpu_ras_block_late_init(adev, ras_block);
if (r) if (r)
return r; return r;
if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) { if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i); AMDGPU_SDMA_IRQ_INSTANCE0 + i);
...@@ -107,7 +107,7 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, ...@@ -107,7 +107,7 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
return 0; return 0;
late_fini: late_fini:
amdgpu_ras_block_late_fini(adev, adev->sdma.ras_if); amdgpu_ras_block_late_fini(adev, ras_block);
return r; return r;
} }
......
...@@ -140,11 +140,11 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r ...@@ -140,11 +140,11 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
{ {
int r; int r;
r = amdgpu_ras_block_late_init(adev, adev->umc.ras_if); r = amdgpu_ras_block_late_init(adev, ras_block);
if (r) if (r)
return r; return r;
if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) { if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
if (r) if (r)
goto late_fini; goto late_fini;
...@@ -158,7 +158,7 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r ...@@ -158,7 +158,7 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
return 0; return 0;
late_fini: late_fini:
amdgpu_ras_block_late_fini(adev, adev->umc.ras_if); amdgpu_ras_block_late_fini(adev, ras_block);
return r; return r;
} }
......
...@@ -740,7 +740,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm ...@@ -740,7 +740,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
return amdgpu_ras_block_late_init(adev, adev->gmc.xgmi.ras_if); return amdgpu_ras_block_late_init(adev, ras_block);
} }
static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
......
...@@ -4792,7 +4792,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) ...@@ -4792,7 +4792,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
return r; return r;
if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_late_init) { if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_late_init) {
r = adev->gfx.ras->ras_block.ras_late_init(adev, NULL); r = adev->gfx.ras->ras_block.ras_late_init(adev, adev->gfx.ras_if);
if (r) if (r)
return r; return r;
} }
......
...@@ -1895,7 +1895,7 @@ static int sdma_v4_0_late_init(void *handle) ...@@ -1895,7 +1895,7 @@ static int sdma_v4_0_late_init(void *handle)
} }
if (adev->sdma.ras && adev->sdma.ras->ras_block.ras_late_init) if (adev->sdma.ras && adev->sdma.ras->ras_block.ras_late_init)
return adev->sdma.ras->ras_block.ras_late_init(adev, NULL); return adev->sdma.ras->ras_block.ras_late_init(adev, adev->sdma.ras_if);
else else
return 0; return 0;
} }
......
...@@ -1195,7 +1195,7 @@ static int soc15_common_late_init(void *handle) ...@@ -1195,7 +1195,7 @@ static int soc15_common_late_init(void *handle)
xgpu_ai_mailbox_get_irq(adev); xgpu_ai_mailbox_get_irq(adev);
if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_late_init) if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_late_init)
r = adev->nbio.ras->ras_block.ras_late_init(adev, NULL); r = adev->nbio.ras->ras_block.ras_late_init(adev, adev->nbio.ras_if);
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment