Commit 232f2431 authored by Guchun Chen's avatar Guchun Chen Committed by Alex Deucher

drm/amdgpu/gfx: set sched.ready status after ring/IB test in gfx

sched.ready is nothing with ring initialization, it needs to set
to be true after ring/IB test in amdgpu_ring_test_helper to tell
the ring is ready for submission.
Signed-off-by: default avatarGuchun Chen <guchun.chen@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 61c31b8b
...@@ -6073,7 +6073,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -6073,7 +6073,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp; u32 tmp;
u32 rb_bufsz; u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr; u64 rb_addr, rptr_addr, wptr_gpu_addr;
u32 i;
/* Set the write pointer delay */ /* Set the write pointer delay */
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
...@@ -6168,11 +6167,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -6168,11 +6167,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */ /* start the ring */
gfx_v10_0_cp_gfx_start(adev); gfx_v10_0_cp_gfx_start(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->sched.ready = true;
}
return 0; return 0;
} }
...@@ -6470,7 +6464,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) ...@@ -6470,7 +6464,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(ring->mqd_obj, false); r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
goto done; return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) { if (!r) {
...@@ -6480,23 +6474,14 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) ...@@ -6480,23 +6474,14 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
} }
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
if (r) if (r)
goto done; return r;
} }
r = amdgpu_gfx_enable_kgq(adev, 0); r = amdgpu_gfx_enable_kgq(adev, 0);
if (r) if (r)
goto done; return r;
r = gfx_v10_0_cp_gfx_start(adev);
if (r)
goto done;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) { return gfx_v10_0_cp_gfx_start(adev);
ring = &adev->gfx.gfx_ring[i];
ring->sched.ready = true;
}
done:
return r;
} }
static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m, static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
...@@ -6812,7 +6797,6 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) ...@@ -6812,7 +6797,6 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL; ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
ring->sched.ready = true;
return 0; return 0;
} }
......
...@@ -3228,7 +3228,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -3228,7 +3228,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp; u32 tmp;
u32 rb_bufsz; u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr; u64 rb_addr, rptr_addr, wptr_gpu_addr;
u32 i;
/* Set the write pointer delay */ /* Set the write pointer delay */
WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
...@@ -3320,11 +3319,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -3320,11 +3319,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */ /* start the ring */
gfx_v11_0_cp_gfx_start(adev); gfx_v11_0_cp_gfx_start(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->sched.ready = true;
}
return 0; return 0;
} }
...@@ -3370,8 +3364,6 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) ...@@ -3370,8 +3364,6 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
} }
adev->gfx.kiq[0].ring.sched.ready = enable;
udelay(50); udelay(50);
} }
...@@ -3711,7 +3703,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) ...@@ -3711,7 +3703,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(ring->mqd_obj, false); r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
goto done; return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) { if (!r) {
...@@ -3721,23 +3713,14 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) ...@@ -3721,23 +3713,14 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
} }
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
if (r) if (r)
goto done; return r;
} }
r = amdgpu_gfx_enable_kgq(adev, 0); r = amdgpu_gfx_enable_kgq(adev, 0);
if (r) if (r)
goto done; return r;
r = gfx_v11_0_cp_gfx_start(adev);
if (r)
goto done;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) { return gfx_v11_0_cp_gfx_start(adev);
ring = &adev->gfx.gfx_ring[i];
ring->sched.ready = true;
}
done:
return r;
} }
static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
......
...@@ -4283,7 +4283,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -4283,7 +4283,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */ /* start the ring */
amdgpu_ring_clear_ring(ring); amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev); gfx_v8_0_cp_gfx_start(adev);
ring->sched.ready = true;
return 0; return 0;
} }
...@@ -4693,7 +4692,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) ...@@ -4693,7 +4692,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL; ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
ring->sched.ready = true;
return 0; return 0;
} }
......
...@@ -3144,7 +3144,6 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -3144,7 +3144,6 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */ /* start the ring */
gfx_v9_0_cp_gfx_start(adev); gfx_v9_0_cp_gfx_start(adev);
ring->sched.ready = true;
return 0; return 0;
} }
...@@ -3623,7 +3622,6 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) ...@@ -3623,7 +3622,6 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL; ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
ring->sched.ready = true;
return 0; return 0;
} }
......
...@@ -1845,7 +1845,6 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) ...@@ -1845,7 +1845,6 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL; ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
ring->sched.ready = true;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment