Commit 97588b5b authored by Christian König's avatar Christian König

drm/ttm: remove pointers to globals

As the name says global memory and bo accounting is global. So it doesn't
make to much sense having pointers to global structures all around the code.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarThomas Hellström <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/332879/
parent 9165fb87
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
*/ */
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{ {
struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page; struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr) if (adev->dummy_page_addr)
return 0; return 0;
......
...@@ -600,19 +600,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) ...@@ -600,19 +600,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base; struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) { if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return; return;
} }
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) { list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo; struct amdgpu_bo *bo = bo_base->bo;
...@@ -624,7 +623,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, ...@@ -624,7 +623,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo, ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move); &vm->lru_bulk_move);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true; vm->bulk_moveable = true;
} }
......
...@@ -1013,12 +1013,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data) ...@@ -1013,12 +1013,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_vram_mm *vmm = node->minor->dev->vram_mm; struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv; struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
struct ttm_bo_global *glob = vmm->bdev.glob;
struct drm_printer p = drm_seq_file_printer(m); struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p); drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return 0; return 0;
} }
......
...@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev, ...@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release) void qxl_release_fence_buffer_objects(struct qxl_release *release)
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev; struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
struct qxl_device *qdev; struct qxl_device *qdev;
...@@ -451,9 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -451,9 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno); release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base); trace_dma_fence_emit(&release->base);
glob = bdev->glob; spin_lock(&ttm_bo_glob.lru_lock);
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, &release->bos, head) { list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo; bo = entry->bo;
...@@ -462,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -462,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket); ww_acquire_fini(&release->ticket);
} }
...@@ -319,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) ...@@ -319,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct qxl_device *rdev = dev->dev_private;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m); struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p); drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return 0; return 0;
} }
#endif #endif
......
...@@ -51,7 +51,7 @@ struct ttm_agp_backend { ...@@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{ {
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page; struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node; struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem; struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
......
...@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); ...@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex); DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count; unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob; struct ttm_bo_global ttm_bo_glob;
EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = { static struct attribute ttm_bo_count = {
.name = "bo_count", .name = "bo_count",
...@@ -148,7 +149,6 @@ static void ttm_bo_release_list(struct kref *list_kref) ...@@ -148,7 +149,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
{ {
struct ttm_buffer_object *bo = struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref); container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size; size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref)); BUG_ON(kref_read(&bo->list_kref));
...@@ -157,13 +157,13 @@ static void ttm_bo_release_list(struct kref *list_kref) ...@@ -157,13 +157,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy)); BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->bdev->glob->bo_count); atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving); dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo)) if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv); dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex); mutex_destroy(&bo->wu_mutex);
bo->destroy(bo); bo->destroy(bo);
ttm_mem_global_free(bdev->glob->mem_glob, acc_size); ttm_mem_global_free(&ttm_mem_glob, acc_size);
} }
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
...@@ -187,7 +187,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, ...@@ -187,7 +187,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) { TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
} }
} }
...@@ -294,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) ...@@ -294,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv); dma_resv_assert_held(pos->last->base.resv);
lru = &pos->first->bdev->glob->swap_lru[i]; lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
} }
} }
...@@ -458,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) ...@@ -458,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
int ret; int ret;
ret = ttm_bo_individualize_resv(bo); ret = ttm_bo_individualize_resv(bo);
...@@ -468,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -468,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/ */
dma_resv_wait_timeout_rcu(bo->base.resv, true, false, dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ); 30 * HZ);
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
goto error; goto error;
} }
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) { if (!ret) {
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo); ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
if (bo->base.resv != &bo->base._resv) if (bo->base.resv != &bo->base._resv)
dma_resv_unlock(&bo->base._resv); dma_resv_unlock(&bo->base._resv);
...@@ -506,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -506,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
error: error:
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy); list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq, schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100); ((HZ / 100) < 1) ? 1 : HZ / 100);
...@@ -529,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -529,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu, bool interruptible, bool no_wait_gpu,
bool unlock_resv) bool unlock_resv)
{ {
struct ttm_bo_global *glob = bo->bdev->glob;
struct dma_resv *resv; struct dma_resv *resv;
int ret; int ret;
...@@ -548,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -548,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv) if (unlock_resv)
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true, lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible, interruptible,
...@@ -559,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -559,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0) else if (lret == 0)
return -EBUSY; return -EBUSY;
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/* /*
* We raced, and lost, someone else holds the reservation now, * We raced, and lost, someone else holds the reservation now,
...@@ -569,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -569,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success * delayed destruction would succeed, so just return success
* here. * here.
*/ */
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return 0; return 0;
} }
ret = 0; ret = 0;
...@@ -578,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -578,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) { if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv) if (unlock_resv)
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return ret; return ret;
} }
...@@ -586,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -586,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy); list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo); ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv) if (unlock_resv)
...@@ -601,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -601,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/ */
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{ {
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed; struct list_head removed;
bool empty; bool empty;
...@@ -825,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -825,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false; bool locked = false;
unsigned i; unsigned i;
int ret; int ret;
spin_lock(&glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) { list_for_each_entry(bo, &man->lru[i], lru) {
bool busy; bool busy;
...@@ -863,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -863,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) { if (!bo) {
if (busy_bo) if (busy_bo)
kref_get(&busy_bo->list_kref); kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo) if (busy_bo)
kref_put(&busy_bo->list_kref, ttm_bo_release_list); kref_put(&busy_bo->list_kref, ttm_bo_release_list);
...@@ -879,7 +876,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -879,7 +876,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret; return ret;
} }
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx); ret = ttm_bo_evict(bo, ctx);
if (locked) if (locked)
...@@ -1045,10 +1042,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, ...@@ -1045,10 +1042,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type; mem->mem_type = mem_type;
mem->placement = cur_flags; mem->placement = cur_flags;
spin_lock(&bo->bdev->glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_del_from_lru(bo); ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, mem); ttm_bo_add_mem_to_lru(bo, mem);
spin_unlock(&bo->bdev->glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return 0; return 0;
} }
...@@ -1135,9 +1132,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1135,9 +1132,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error: error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
spin_lock(&bo->bdev->glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
} }
return ret; return ret;
...@@ -1261,9 +1258,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1261,9 +1258,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct dma_resv *resv, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0; int ret = 0;
unsigned long num_pages; unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked; bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
...@@ -1323,7 +1320,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1323,7 +1320,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv); dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node); drm_vma_node_reset(&bo->base.vma_node);
} }
atomic_inc(&bo->bdev->glob->bo_count); atomic_inc(&ttm_bo_glob.bo_count);
/* /*
* For ttm_bo_type_device buffers, allocate * For ttm_bo_type_device buffers, allocate
...@@ -1353,9 +1350,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1353,9 +1350,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret; return ret;
} }
spin_lock(&bdev->glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bdev->glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
return ret; return ret;
} }
...@@ -1453,7 +1450,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, ...@@ -1453,7 +1450,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC .flags = TTM_OPT_FLAG_FORCE_ALLOC
}; };
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
unsigned i; unsigned i;
...@@ -1622,8 +1619,6 @@ static int ttm_bo_global_init(void) ...@@ -1622,8 +1619,6 @@ static int ttm_bo_global_init(void)
goto out; goto out;
spin_lock_init(&glob->lru_lock); spin_lock_init(&glob->lru_lock);
glob->mem_glob = &ttm_mem_glob;
glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) { if (unlikely(glob->dummy_read_page == NULL)) {
...@@ -1647,10 +1642,10 @@ static int ttm_bo_global_init(void) ...@@ -1647,10 +1642,10 @@ static int ttm_bo_global_init(void)
int ttm_bo_device_release(struct ttm_bo_device *bdev) int ttm_bo_device_release(struct ttm_bo_device *bdev)
{ {
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0; int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES; unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
struct ttm_bo_global *glob = bdev->glob;
while (i--) { while (i--) {
man = &bdev->man[i]; man = &bdev->man[i];
...@@ -1719,7 +1714,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, ...@@ -1719,7 +1714,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy); INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping; bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32; bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex); mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list); list_add_tail(&bdev->device_list, &glob->device_list);
...@@ -1898,8 +1892,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) ...@@ -1898,8 +1892,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false .no_wait_gpu = false
}; };
while (ttm_bo_swapout(bdev->glob, &ctx) == 0) while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
;
} }
EXPORT_SYMBOL(ttm_bo_swapout_all); EXPORT_SYMBOL(ttm_bo_swapout_all);
......
...@@ -503,7 +503,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -503,7 +503,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here. * TODO: Explicit member copy would probably be better here.
*/ */
atomic_inc(&bo->bdev->glob->bo_count); atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap); INIT_LIST_HEAD(&fbo->base.swap);
......
...@@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
} }
if (bo->moving != moving) { if (bo->moving != moving) {
spin_lock(&bdev->glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bdev->glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
} }
dma_fence_put(moving); dma_fence_put(moving);
} }
......
...@@ -47,22 +47,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -47,22 +47,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list) struct list_head *list)
{ {
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
if (list_empty(list)) if (list_empty(list))
return; return;
entry = list_first_entry(list, struct ttm_validate_buffer, head); spin_lock(&ttm_bo_glob.lru_lock);
glob = entry->bo->bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket) if (ticket)
ww_acquire_fini(ticket); ww_acquire_fini(ticket);
...@@ -85,16 +81,12 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -85,16 +81,12 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr, struct list_head *list, bool intr,
struct list_head *dups) struct list_head *dups)
{ {
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
int ret; int ret;
if (list_empty(list)) if (list_empty(list))
return 0; return 0;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->bdev->glob;
if (ticket) if (ticket)
ww_acquire_init(ticket, &reservation_ww_class); ww_acquire_init(ticket, &reservation_ww_class);
...@@ -166,19 +158,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -166,19 +158,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence) struct dma_fence *fence)
{ {
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
if (list_empty(list)) if (list_empty(list))
return; return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; spin_lock(&ttm_bo_glob.lru_lock);
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
if (entry->num_shared) if (entry->num_shared)
dma_resv_add_shared_fence(bo->base.resv, fence); dma_resv_add_shared_fence(bo->base.resv, fence);
else else
...@@ -186,7 +173,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -186,7 +173,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket) if (ticket)
ww_acquire_fini(ticket); ww_acquire_fini(ticket);
} }
......
...@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, ...@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) { while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock); spin_unlock(&glob->lock);
ret = ttm_bo_swapout(glob->bo_glob, ctx); ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock); spin_lock(&glob->lock);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
break; break;
......
...@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void) ...@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{ {
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i; unsigned i;
if (mem_count_update == 0) if (mem_count_update == 0)
...@@ -1049,7 +1049,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) ...@@ -1049,7 +1049,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i; unsigned i;
int ret; int ret;
......
...@@ -886,8 +886,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) ...@@ -886,8 +886,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages; unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool; struct dma_pool *pool;
struct dma_page *d_page; struct dma_page *d_page;
...@@ -991,8 +991,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate); ...@@ -991,8 +991,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool; struct dma_pool *pool;
struct dma_page *d_page, *next; struct dma_page *d_page, *next;
enum pool_type type; enum pool_type type;
......
...@@ -423,7 +423,6 @@ extern struct ttm_bo_global { ...@@ -423,7 +423,6 @@ extern struct ttm_bo_global {
*/ */
struct kobject kobj; struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page; struct page *dummy_read_page;
spinlock_t lru_lock; spinlock_t lru_lock;
...@@ -467,7 +466,6 @@ struct ttm_bo_device { ...@@ -467,7 +466,6 @@ struct ttm_bo_device {
* Constant after bo device init / atomic. * Constant after bo device init / atomic.
*/ */
struct list_head device_list; struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver; struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
...@@ -768,9 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -768,9 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/ */
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{ {
spin_lock(&bo->bdev->glob->lru_lock); spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL); ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock); spin_unlock(&ttm_bo_glob.lru_lock);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
......
...@@ -65,7 +65,6 @@ ...@@ -65,7 +65,6 @@
struct ttm_mem_zone; struct ttm_mem_zone;
extern struct ttm_mem_global { extern struct ttm_mem_global {
struct kobject kobj; struct kobject kobj;
struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue; struct workqueue_struct *swap_queue;
struct work_struct work; struct work_struct work;
spinlock_t lock; spinlock_t lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment