Commit 267501ec authored by Christian König's avatar Christian König

drm/amdgpu: switch the VRAM backend to self alloc

Similar to the TTM range manager.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-7-christian.koenig@amd.com
parent f700b18c
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
*/ */
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_vm.h" #include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h" #include "amdgpu_res_cursor.h"
...@@ -371,9 +373,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -371,9 +373,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
uint64_t vis_usage = 0, mem_bytes, max_bytes; uint64_t vis_usage = 0, mem_bytes, max_bytes;
struct ttm_range_mgr_node *node;
struct drm_mm *mm = &mgr->mm; struct drm_mm *mm = &mgr->mm;
enum drm_mm_insert_mode mode; enum drm_mm_insert_mode mode;
struct drm_mm_node *nodes;
unsigned i; unsigned i;
int r; int r;
...@@ -388,8 +390,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -388,8 +390,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* bail out quickly if there's likely not enough VRAM for this BO */ /* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage); r = -ENOSPC;
return -ENOSPC; goto error_sub;
} }
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
...@@ -407,13 +409,15 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -407,13 +409,15 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
} }
nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!nodes) { if (!node) {
atomic64_sub(mem_bytes, &mgr->usage); r = -ENOMEM;
return -ENOMEM; goto error_sub;
} }
ttm_resource_init(tbo, place, &node->base);
mode = DRM_MM_INSERT_BEST; mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN) if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH; mode = DRM_MM_INSERT_HIGH;
...@@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (pages >= pages_per_node) if (pages >= pages_per_node)
alignment = pages_per_node; alignment = pages_per_node;
r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment, r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
0, place->fpfn, lpfn, mode); alignment, 0, place->fpfn,
lpfn, mode);
if (unlikely(r)) { if (unlikely(r)) {
if (pages > pages_per_node) { if (pages > pages_per_node) {
if (is_power_of_2(pages)) if (is_power_of_2(pages))
...@@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
pages = rounddown_pow_of_two(pages); pages = rounddown_pow_of_two(pages);
continue; continue;
} }
goto error; goto error_free;
} }
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
amdgpu_vram_mgr_virt_start(mem, &nodes[i]); amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]);
pages_left -= pages; pages_left -= pages;
++i; ++i;
...@@ -459,16 +464,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -459,16 +464,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
mem->placement |= TTM_PL_FLAG_CONTIGUOUS; mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
atomic64_add(vis_usage, &mgr->vis_usage); atomic64_add(vis_usage, &mgr->vis_usage);
mem->mm_node = nodes; mem->mm_node = &node->mm_nodes[0];
return 0; return 0;
error: error_free:
while (i--) while (i--)
drm_mm_remove_node(&nodes[i]); drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); kvfree(node);
kvfree(nodes); error_sub:
atomic64_sub(mem_bytes, &mgr->usage);
return r; return r;
} }
...@@ -485,13 +491,17 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, ...@@ -485,13 +491,17 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_mm_node *nodes = mem->mm_node; struct ttm_range_mgr_node *node;
uint64_t usage = 0, vis_usage = 0; uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages; unsigned pages = mem->num_pages;
struct drm_mm_node *nodes;
if (!mem->mm_node) if (!mem->mm_node)
return; return;
node = to_ttm_range_mgr_node(mem);
nodes = &node->mm_nodes[0];
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
while (pages) { while (pages) {
pages -= nodes->size; pages -= nodes->size;
...@@ -506,8 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, ...@@ -506,8 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
atomic64_sub(usage, &mgr->usage); atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage); atomic64_sub(vis_usage, &mgr->vis_usage);
kvfree(mem->mm_node); kvfree(node);
mem->mm_node = NULL;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment