Commit 003cefe0 authored by Alex Deucher's avatar Alex Deucher Committed by Dave Airlie

drm/radeon/kms: Make GPU/CPU page size handling consistent in blit code (v2)

The BO blit code inconsistenly handled the page size.  This wasn't
an issue on system with 4k pages since the GPU's page size is 4k as
well.  Switch the driver blit callbacks to take num pages in GPU
page units.

Fixes lemote mipsel systems using AMD rs780/rs880 chipsets.

v2: incorporate suggestions from Michel.
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Cc: stable@kernel.org
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 18b4fada
...@@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev) ...@@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev)
} }
int evergreen_copy_blit(struct radeon_device *rdev, int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset,
unsigned num_pages, struct radeon_fence *fence) uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{ {
int r; int r;
mutex_lock(&rdev->r600_blit.mutex); mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL; rdev->r600_blit.vb_ib = NULL;
r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) { if (r) {
if (rdev->r600_blit.vb_ib) if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return r; return r;
} }
evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
evergreen_blit_done_copy(rdev, fence); evergreen_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return 0; return 0;
......
...@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, ...@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
int r100_copy_blit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
uint32_t cur_pages; uint32_t cur_pages;
uint32_t stride_bytes = PAGE_SIZE; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch; uint32_t pitch;
uint32_t stride_pixels; uint32_t stride_pixels;
unsigned ndw; unsigned ndw;
...@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* radeon pitch is /64 */ /* radeon pitch is /64 */
pitch = stride_bytes / 64; pitch = stride_bytes / 64;
stride_pixels = stride_bytes / 4; stride_pixels = stride_bytes / 4;
num_loops = DIV_ROUND_UP(num_pages, 8191); num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
/* Ask for enough room for blit + flush + fence */ /* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops); ndw = 64 + (10 * num_loops);
...@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL; return -EINVAL;
} }
while (num_pages > 0) { while (num_gpu_pages > 0) {
cur_pages = num_pages; cur_pages = num_gpu_pages;
if (cur_pages > 8191) { if (cur_pages > 8191) {
cur_pages = 8191; cur_pages = 8191;
} }
num_pages -= cur_pages; num_gpu_pages -= cur_pages;
/* pages are in Y direction - height /* pages are in Y direction - height
page width in X direction - width */ page width in X direction - width */
......
...@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) ...@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
int r200_copy_dma(struct radeon_device *rdev, int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
uint32_t size; uint32_t size;
...@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
int r = 0; int r = 0;
/* radeon pitch is /64 */ /* radeon pitch is /64 */
size = num_pages << PAGE_SHIFT; size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF); num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64); r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) { if (r) {
......
...@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, ...@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} }
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset,
unsigned num_pages, struct radeon_fence *fence) uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{ {
int r; int r;
mutex_lock(&rdev->r600_blit.mutex); mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL; rdev->r600_blit.vb_ib = NULL;
r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) { if (r) {
if (rdev->r600_blit.vb_ib) if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return r; return r;
} }
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence); r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return 0; return 0;
......
...@@ -322,6 +322,7 @@ union radeon_gart_table { ...@@ -322,6 +322,7 @@ union radeon_gart_table {
#define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
struct radeon_gart { struct radeon_gart {
dma_addr_t table_addr; dma_addr_t table_addr;
...@@ -914,17 +915,17 @@ struct radeon_asic { ...@@ -914,17 +915,17 @@ struct radeon_asic {
int (*copy_blit)(struct radeon_device *rdev, int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
int (*copy_dma)(struct radeon_device *rdev, int (*copy_dma)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
int (*copy)(struct radeon_device *rdev, int (*copy)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
uint32_t (*get_engine_clock)(struct radeon_device *rdev); uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
......
...@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); ...@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg, int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch, uint32_t tiling_flags, uint32_t pitch,
...@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); ...@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern int r200_copy_dma(struct radeon_device *rdev, extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
void r200_set_safe_registers(struct radeon_device *rdev); void r200_set_safe_registers(struct radeon_device *rdev);
...@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); ...@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev); int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence); unsigned num_gpu_pages, struct radeon_fence *fence);
void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
...@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); ...@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev, int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence); unsigned num_gpu_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
......
...@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Trying to move memory with CP turned off.\n"); DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL; return -EINVAL;
} }
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence);
/* FIXME: handle copy error */ /* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem); evict, no_wait_reserve, no_wait_gpu, new_mem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment