Commit 089920f2 authored by Jerome Glisse's avatar Jerome Glisse Committed by Alex Deucher

drm/radeon: fix write back suspend regression with uvd v2

UVD ring can't use scratch thus it does need writeback buffer to keep
a valid address or radeon_ring_backup will trigger a kernel fault.

It's ok to not unpin the write back buffer on suspend as it leave in
gtt and thus does not need eviction.

v2: Fix the uvd case.

Reported and tracked by Wojtek <wojtask9@wp.pl>
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3813f5ca
...@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) ...@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/ */
void radeon_wb_disable(struct radeon_device *rdev) void radeon_wb_disable(struct radeon_device *rdev)
{ {
int r;
if (rdev->wb.wb_obj) {
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0))
return;
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
rdev->wb.enabled = false; rdev->wb.enabled = false;
} }
...@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev) ...@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
{ {
radeon_wb_disable(rdev); radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) { if (rdev->wb.wb_obj) {
if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
radeon_bo_unref(&rdev->wb.wb_obj); radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL; rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL; rdev->wb.wb_obj = NULL;
...@@ -295,7 +290,6 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -295,7 +290,6 @@ int radeon_wb_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r; return r;
} }
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false); r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
radeon_wb_fini(rdev); radeon_wb_fini(rdev);
...@@ -316,6 +310,7 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -316,6 +310,7 @@ int radeon_wb_init(struct radeon_device *rdev)
radeon_wb_fini(rdev); radeon_wb_fini(rdev);
return r; return r;
} }
}
/* clear wb memory */ /* clear wb memory */
memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
......
...@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) ...@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{ {
struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) { if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
if (drv->cpu_addr) {
*drv->cpu_addr = cpu_to_le32(seq); *drv->cpu_addr = cpu_to_le32(seq);
}
} else { } else {
WREG32(drv->scratch_reg, seq); WREG32(drv->scratch_reg, seq);
} }
...@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) ...@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
u32 seq = 0; u32 seq = 0;
if (likely(rdev->wb.enabled || !drv->scratch_reg)) { if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
if (drv->cpu_addr) {
seq = le32_to_cpu(*drv->cpu_addr); seq = le32_to_cpu(*drv->cpu_addr);
} else {
seq = lower_32_bits(atomic64_read(&drv->last_seq));
}
} else { } else {
seq = RREG32(drv->scratch_reg); seq = RREG32(drv->scratch_reg);
} }
......
...@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev) ...@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
if (!r) { if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo);
rdev->uvd.cpu_addr = NULL;
if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
}
radeon_bo_unreserve(rdev->uvd.vcpu_bo); radeon_bo_unreserve(rdev->uvd.vcpu_bo);
if (rdev->uvd.cpu_addr) {
radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
} else {
rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
}
} }
return r; return r;
} }
...@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev) ...@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
return r; return r;
} }
/* Have been pin in cpu unmap unpin */
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
&rdev->uvd.gpu_addr); &rdev->uvd.gpu_addr);
if (r) { if (r) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment