Commit c5e4c6bb authored by Tom St Denis's avatar Tom St Denis Committed by Alex Deucher

drm/amd/amdgpu: Bail out of BO node creation if not enough VRAM (v3)

(v2): Return 0 and set mem->mm_node to NULL.
(v3): Use atomic64_add_return instead.
Signed-off-by: default avatarTom St Denis <tom.stdenis@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e9bc1bf7
...@@ -276,7 +276,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -276,7 +276,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct drm_mm_node *nodes; struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode; enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left; unsigned long lpfn, num_nodes, pages_per_node, pages_left;
uint64_t usage = 0, vis_usage = 0; uint64_t vis_usage = 0;
unsigned i; unsigned i;
int r; int r;
...@@ -284,6 +284,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -284,6 +284,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn) if (!lpfn)
lpfn = man->size; lpfn = man->size;
/* bail out quickly if there's likely not enough VRAM for this BO */
if (atomic64_add_return(mem->num_pages << PAGE_SHIFT, &mgr->usage) > adev->gmc.mc_vram_size) {
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
mem->mm_node = NULL;
return 0;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
pages_per_node = ~0ul; pages_per_node = ~0ul;
num_nodes = 1; num_nodes = 1;
...@@ -300,8 +307,10 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -300,8 +307,10 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!nodes) if (!nodes) {
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
return -ENOMEM; return -ENOMEM;
}
mode = DRM_MM_INSERT_BEST; mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN) if (place->flags & TTM_PL_FLAG_TOPDOWN)
...@@ -321,7 +330,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -321,7 +330,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r)) if (unlikely(r))
break; break;
usage += nodes[i].size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
amdgpu_vram_mgr_virt_start(mem, &nodes[i]); amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
pages_left -= pages; pages_left -= pages;
...@@ -341,14 +349,12 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -341,14 +349,12 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r)) if (unlikely(r))
goto error; goto error;
usage += nodes[i].size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
amdgpu_vram_mgr_virt_start(mem, &nodes[i]); amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
pages_left -= pages; pages_left -= pages;
} }
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_add(usage, &mgr->usage);
atomic64_add(vis_usage, &mgr->vis_usage); atomic64_add(vis_usage, &mgr->vis_usage);
mem->mm_node = nodes; mem->mm_node = nodes;
...@@ -359,6 +365,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -359,6 +365,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
while (i--) while (i--)
drm_mm_remove_node(&nodes[i]); drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes); kvfree(nodes);
return r == -ENOSPC ? 0 : r; return r == -ENOSPC ? 0 : r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment