Commit 3084cf46 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: return -EBUSY on pipelining with no_gpu_wait (v2)

Setting the no_gpu_wait flag means that the allocate BO must be available
immediately and we can't wait for any GPU operation to finish.

v2: squash in mem leak fix, rebase
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6817bf28
...@@ -925,7 +925,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put); ...@@ -925,7 +925,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/ */
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man, struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem,
bool no_wait_gpu)
{ {
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
...@@ -934,7 +935,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, ...@@ -934,7 +935,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move); fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock); spin_unlock(&man->move_lock);
if (fence) { if (!fence)
return 0;
if (no_wait_gpu)
return -EBUSY;
dma_resv_add_shared_fence(bo->base.resv, fence); dma_resv_add_shared_fence(bo->base.resv, fence);
ret = dma_resv_reserve_shared(bo->base.resv, 1); ret = dma_resv_reserve_shared(bo->base.resv, 1);
...@@ -945,8 +951,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, ...@@ -945,8 +951,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
dma_fence_put(bo->moving); dma_fence_put(bo->moving);
bo->moving = fence; bo->moving = fence;
}
return 0; return 0;
} }
...@@ -977,7 +981,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -977,7 +981,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret; return ret;
} while (1); } while (1);
return ttm_bo_add_move_fence(bo, man, mem); return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
} }
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
...@@ -1119,15 +1123,19 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1119,15 +1123,19 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret)) if (unlikely(ret))
goto error; goto error;
if (mem->mm_node) { if (!mem->mm_node)
ret = ttm_bo_add_move_fence(bo, man, mem); continue;
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) { if (unlikely(ret)) {
(*man->func->put_node)(man, mem); (*man->func->put_node)(man, mem);
if (ret == -EBUSY)
continue;
goto error; goto error;
} }
return 0; return 0;
} }
}
for (i = 0; i < placement->num_busy_placement; ++i) { for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i]; const struct ttm_place *place = &placement->busy_placement[i];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment