Commit e460f244 authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: plumb error handling though amdgpu_benchmark()

So we can tell when this function fails.

v2: squash in error handling fix (Alex)
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 091cd9c3
...@@ -586,7 +586,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); ...@@ -586,7 +586,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
/* /*
* Benchmarking * Benchmarking
*/ */
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
/* /*
......
...@@ -70,15 +70,14 @@ static void amdgpu_benchmark_log_results(struct amdgpu_device *adev, ...@@ -70,15 +70,14 @@ static void amdgpu_benchmark_log_results(struct amdgpu_device *adev,
throughput * 8, throughput); throughput * 8, throughput);
} }
static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, static int amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
unsigned sdomain, unsigned ddomain) unsigned sdomain, unsigned ddomain)
{ {
struct amdgpu_bo *dobj = NULL; struct amdgpu_bo *dobj = NULL;
struct amdgpu_bo *sobj = NULL; struct amdgpu_bo *sobj = NULL;
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
uint64_t saddr, daddr; uint64_t saddr, daddr;
int r, n; int r, n;
int time;
memset(&bp, 0, sizeof(bp)); memset(&bp, 0, sizeof(bp));
bp.size = size; bp.size = size;
...@@ -129,19 +128,18 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -129,19 +128,18 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
daddr = amdgpu_bo_gpu_offset(dobj); daddr = amdgpu_bo_gpu_offset(dobj);
if (adev->mman.buffer_funcs) { if (adev->mman.buffer_funcs) {
time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n); r = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
if (time < 0) if (r < 0)
goto out_cleanup; goto out_cleanup;
if (time > 0) if (r > 0)
amdgpu_benchmark_log_results(adev, n, size, time, amdgpu_benchmark_log_results(adev, n, size, r,
sdomain, ddomain, "dma"); sdomain, ddomain, "dma");
} }
out_cleanup: out_cleanup:
/* Check error value now. The value can be overwritten when clean up.*/ /* Check error value now. The value can be overwritten when clean up.*/
if (r) { if (r < 0)
dev_info(adev->dev, "Error while benchmarking BO move.\n"); dev_info(adev->dev, "Error while benchmarking BO move.\n");
}
if (sobj) { if (sobj) {
r = amdgpu_bo_reserve(sobj, true); r = amdgpu_bo_reserve(sobj, true);
...@@ -159,11 +157,12 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -159,11 +157,12 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
} }
amdgpu_bo_unref(&dobj); amdgpu_bo_unref(&dobj);
} }
return r;
} }
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number) int amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
{ {
int i; int i, r;
static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = { static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4, 640 * 480 * 4,
720 * 480 * 4, 720 * 480 * 4,
...@@ -187,60 +186,87 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number) ...@@ -187,60 +186,87 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
switch (test_number) { switch (test_number) {
case 1: case 1:
/* simple test, VRAM to GTT and GTT to VRAM */ /* simple test, VRAM to GTT and GTT to VRAM */
amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT, r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM, if (r)
AMDGPU_GEM_DOMAIN_GTT); return r;
r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
return r;
break; break;
case 2: case 2:
/* simple test, VRAM to VRAM */ /* simple test, VRAM to VRAM */
amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM, r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
if (r)
return r;
break; break;
case 3: case 3:
/* GTT to VRAM, buffer size sweep, powers of 2 */ /* GTT to VRAM, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1) for (i = 1; i <= 16384; i <<= 1) {
amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE, r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
if (r)
return r;
}
break; break;
case 4: case 4:
/* VRAM to GTT, buffer size sweep, powers of 2 */ /* VRAM to GTT, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1) for (i = 1; i <= 16384; i <<= 1) {
amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE, r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_GTT); AMDGPU_GEM_DOMAIN_GTT);
if (r)
return r;
}
break; break;
case 5: case 5:
/* VRAM to VRAM, buffer size sweep, powers of 2 */ /* VRAM to VRAM, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1) for (i = 1; i <= 16384; i <<= 1) {
amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE, r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
if (r)
return r;
}
break; break;
case 6: case 6:
/* GTT to VRAM, buffer size sweep, common modes */ /* GTT to VRAM, buffer size sweep, common modes */
for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
amdgpu_benchmark_move(adev, common_modes[i], r = amdgpu_benchmark_move(adev, common_modes[i],
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
if (r)
return r;
}
break; break;
case 7: case 7:
/* VRAM to GTT, buffer size sweep, common modes */ /* VRAM to GTT, buffer size sweep, common modes */
for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
amdgpu_benchmark_move(adev, common_modes[i], r = amdgpu_benchmark_move(adev, common_modes[i],
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_GTT); AMDGPU_GEM_DOMAIN_GTT);
if (r)
return r;
}
break; break;
case 8: case 8:
/* VRAM to VRAM, buffer size sweep, common modes */ /* VRAM to VRAM, buffer size sweep, common modes */
for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
amdgpu_benchmark_move(adev, common_modes[i], r = amdgpu_benchmark_move(adev, common_modes[i],
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM); AMDGPU_GEM_DOMAIN_VRAM);
if (r)
return r;
}
break; break;
default: default:
dev_info(adev->dev, "Unknown benchmark\n"); dev_info(adev->dev, "Unknown benchmark\n");
r = -EINVAL;
break;
} }
return r;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment