Commit 94214635 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/radeon: fence BO_VAs manually

This allows us to finally remove the VM fence and
so allow concurrent use of it from different engines.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7c42bc1a
...@@ -456,6 +456,7 @@ struct radeon_bo_va { ...@@ -456,6 +456,7 @@ struct radeon_bo_va {
struct list_head bo_list; struct list_head bo_list;
uint32_t flags; uint32_t flags;
uint64_t addr; uint64_t addr;
struct radeon_fence *last_pt_update;
unsigned ref_count; unsigned ref_count;
/* protected by vm mutex */ /* protected by vm mutex */
...@@ -915,6 +916,8 @@ struct radeon_vm_id { ...@@ -915,6 +916,8 @@ struct radeon_vm_id {
}; };
struct radeon_vm { struct radeon_vm {
struct mutex mutex;
struct rb_root va; struct rb_root va;
/* BOs moved, but not yet updated in the PT */ /* BOs moved, but not yet updated in the PT */
...@@ -932,10 +935,6 @@ struct radeon_vm { ...@@ -932,10 +935,6 @@ struct radeon_vm {
struct radeon_bo_va *ib_bo_va; struct radeon_bo_va *ib_bo_va;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* for id and flush management per ring */ /* for id and flush management per ring */
struct radeon_vm_id ids[RADEON_NUM_RINGS]; struct radeon_vm_id ids[RADEON_NUM_RINGS];
}; };
......
...@@ -505,6 +505,9 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, ...@@ -505,6 +505,9 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
if (r) if (r)
return r; return r;
radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
true);
r = radeon_vm_clear_freed(rdev, vm); r = radeon_vm_clear_freed(rdev, vm);
if (r) if (r)
return r; return r;
...@@ -536,6 +539,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, ...@@ -536,6 +539,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r) if (r)
return r; return r;
radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
} }
return radeon_vm_clear_invalids(rdev, vm); return radeon_vm_clear_invalids(rdev, vm);
...@@ -580,7 +585,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -580,7 +585,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to sync rings: %i\n", r); DRM_ERROR("Failed to sync rings: %i\n", r);
goto out; goto out;
} }
radeon_sync_fence(&parser->ib.sync, vm->fence);
if ((rdev->family >= CHIP_TAHITI) && if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) { (parser->chunk_const_ib_idx != -1)) {
......
...@@ -275,9 +275,6 @@ void radeon_vm_fence(struct radeon_device *rdev, ...@@ -275,9 +275,6 @@ void radeon_vm_fence(struct radeon_device *rdev,
{ {
unsigned vm_id = vm->ids[fence->ring].id; unsigned vm_id = vm->ids[fence->ring].id;
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
radeon_fence_unref(&rdev->vm_manager.active[vm_id]); radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence); rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
...@@ -707,8 +704,6 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ...@@ -707,8 +704,6 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
} }
ib.fence->is_vm_update = true; ib.fence->is_vm_update = true;
radeon_bo_fence(pd, ib.fence, false); radeon_bo_fence(pd, ib.fence, false);
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(ib.fence);
} }
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
...@@ -999,8 +994,8 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -999,8 +994,8 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
} }
ib.fence->is_vm_update = true; ib.fence->is_vm_update = true;
radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
radeon_fence_unref(&vm->fence); radeon_fence_unref(&bo_va->last_pt_update);
vm->fence = radeon_fence_ref(ib.fence); bo_va->last_pt_update = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
return 0; return 0;
...@@ -1026,6 +1021,7 @@ int radeon_vm_clear_freed(struct radeon_device *rdev, ...@@ -1026,6 +1021,7 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
r = radeon_vm_bo_update(rdev, bo_va, NULL); r = radeon_vm_bo_update(rdev, bo_va, NULL);
radeon_bo_unref(&bo_va->bo); radeon_bo_unref(&bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va); kfree(bo_va);
if (r) if (r)
return r; return r;
...@@ -1084,6 +1080,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, ...@@ -1084,6 +1080,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
bo_va->bo = radeon_bo_ref(bo_va->bo); bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed); list_add(&bo_va->vm_status, &vm->freed);
} else { } else {
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va); kfree(bo_va);
} }
...@@ -1130,8 +1127,6 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -1130,8 +1127,6 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int i, r; int i, r;
vm->ib_bo_va = NULL; vm->ib_bo_va = NULL;
vm->fence = NULL;
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
vm->ids[i].id = 0; vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL; vm->ids[i].flushed_updates = NULL;
...@@ -1192,11 +1187,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -1192,11 +1187,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
if (!r) { if (!r) {
list_del_init(&bo_va->bo_list); list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo); radeon_bo_unreserve(bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va); kfree(bo_va);
} }
} }
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
radeon_bo_unref(&bo_va->bo); radeon_bo_unref(&bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va); kfree(bo_va);
} }
...@@ -1206,8 +1203,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -1206,8 +1203,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_bo_unref(&vm->page_directory); radeon_bo_unref(&vm->page_directory);
radeon_fence_unref(&vm->fence);
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_unref(&vm->ids[i].flushed_updates); radeon_fence_unref(&vm->ids[i].flushed_updates);
radeon_fence_unref(&vm->ids[i].last_id_use); radeon_fence_unref(&vm->ids[i].last_id_use);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment