Commit f700b18c authored by Christian König's avatar Christian König

drm/amdgpu: switch the GTT backend to self alloc

Similar to the TTM range manager.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-6-christian.koenig@amd.com
parent 2fdcb55d
...@@ -22,11 +22,13 @@ ...@@ -22,11 +22,13 @@
* Authors: Christian König * Authors: Christian König
*/ */
#include <drm/ttm/ttm_range_manager.h>
#include "amdgpu.h" #include "amdgpu.h"
struct amdgpu_gtt_node { struct amdgpu_gtt_node {
struct drm_mm_node node;
struct ttm_buffer_object *tbo; struct ttm_buffer_object *tbo;
struct ttm_range_mgr_node base;
}; };
static inline struct amdgpu_gtt_mgr * static inline struct amdgpu_gtt_mgr *
...@@ -38,7 +40,8 @@ to_gtt_mgr(struct ttm_resource_manager *man) ...@@ -38,7 +40,8 @@ to_gtt_mgr(struct ttm_resource_manager *man)
static inline struct amdgpu_gtt_node * static inline struct amdgpu_gtt_node *
to_amdgpu_gtt_node(struct ttm_resource *res) to_amdgpu_gtt_node(struct ttm_resource *res)
{ {
return container_of(res->mm_node, struct amdgpu_gtt_node, node); return container_of(res->mm_node, struct amdgpu_gtt_node,
base.mm_nodes[0]);
} }
/** /**
...@@ -107,7 +110,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) ...@@ -107,7 +110,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
{ {
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem); struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
return drm_mm_node_allocated(&node->node); return drm_mm_node_allocated(&node->base.mm_nodes[0]);
} }
/** /**
...@@ -138,16 +141,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -138,16 +141,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
atomic64_sub(mem->num_pages, &mgr->available); atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL); node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) { if (!node) {
r = -ENOMEM; r = -ENOMEM;
goto err_out; goto err_out;
} }
node->tbo = tbo; node->tbo = tbo;
ttm_resource_init(tbo, place, &node->base.base);
if (place->lpfn) { if (place->lpfn) {
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, r = drm_mm_insert_node_in_range(&mgr->mm,
&node->base.mm_nodes[0],
mem->num_pages, mem->num_pages,
tbo->page_alignment, 0, tbo->page_alignment, 0,
place->fpfn, place->lpfn, place->fpfn, place->lpfn,
...@@ -156,14 +162,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -156,14 +162,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (unlikely(r)) if (unlikely(r))
goto err_free; goto err_free;
mem->start = node->node.start; mem->start = node->base.mm_nodes[0].start;
} else { } else {
node->node.start = 0; node->base.mm_nodes[0].start = 0;
node->node.size = mem->num_pages; node->base.mm_nodes[0].size = mem->num_pages;
mem->start = AMDGPU_BO_INVALID_OFFSET; mem->start = AMDGPU_BO_INVALID_OFFSET;
} }
mem->mm_node = &node->node; mem->mm_node = &node->base.mm_nodes[0];
return 0; return 0;
err_free: err_free:
...@@ -186,15 +192,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -186,15 +192,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
if (!node) if (!mem->mm_node)
return; return;
node = to_amdgpu_gtt_node(mem);
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
if (drm_mm_node_allocated(&node->node)) if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->node); drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available); atomic64_add(mem->num_pages, &mgr->available);
...@@ -232,7 +240,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) ...@@ -232,7 +240,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) { drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, struct amdgpu_gtt_node, node); node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
r = amdgpu_ttm_recover_gart(node->tbo); r = amdgpu_ttm_recover_gart(node->tbo);
if (r) if (r)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment