Commit 57210c19 authored by xinhui pan's avatar xinhui pan Committed by Alex Deucher

drm_amdgpu: Add job fence to resv conditionally

Job fence on page table should be a shared one, so add it to the root
page talbe bo resv.
last_delayed field is not needed anymore. so remove it.

Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Suggested-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarxinhui pan <xinhui.pan@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 79cb2719
...@@ -1608,9 +1608,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1608,9 +1608,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (!dma_fence_is_signaled(vm->last_direct)) if (!dma_fence_is_signaled(vm->last_direct))
amdgpu_bo_fence(root, vm->last_direct, true); amdgpu_bo_fence(root, vm->last_direct, true);
if (!dma_fence_is_signaled(vm->last_delayed))
amdgpu_bo_fence(root, vm->last_delayed, true);
} }
r = vm->update_funcs->prepare(&params, resv, sync_mode); r = vm->update_funcs->prepare(&params, resv, sync_mode);
...@@ -2588,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) ...@@ -2588,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false; return false;
/* Don't evict VM page tables while they are updated */ /* Don't evict VM page tables while they are updated */
if (!dma_fence_is_signaled(bo_base->vm->last_direct) || if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
!dma_fence_is_signaled(bo_base->vm->last_delayed)) {
amdgpu_vm_eviction_unlock(bo_base->vm); amdgpu_vm_eviction_unlock(bo_base->vm);
return false; return false;
} }
...@@ -2766,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) ...@@ -2766,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0) if (timeout <= 0)
return timeout; return timeout;
timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); return dma_fence_wait_timeout(vm->last_direct, true, timeout);
if (timeout <= 0)
return timeout;
return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
} }
/** /**
...@@ -2843,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2843,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL; vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub(); vm->last_direct = dma_fence_get_stub();
vm->last_delayed = dma_fence_get_stub();
mutex_init(&vm->eviction_lock); mutex_init(&vm->eviction_lock);
vm->evicting = false; vm->evicting = false;
...@@ -2898,7 +2889,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2898,7 +2889,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
error_free_delayed: error_free_delayed:
dma_fence_put(vm->last_direct); dma_fence_put(vm->last_direct);
dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed); drm_sched_entity_destroy(&vm->delayed);
error_free_direct: error_free_direct:
...@@ -3101,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -3101,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_wait(vm->last_direct, false); dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct); dma_fence_put(vm->last_direct);
dma_fence_wait(vm->last_delayed, false);
dma_fence_put(vm->last_delayed);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
......
...@@ -276,7 +276,6 @@ struct amdgpu_vm { ...@@ -276,7 +276,6 @@ struct amdgpu_vm {
/* Last submission to the scheduler entities */ /* Last submission to the scheduler entities */
struct dma_fence *last_direct; struct dma_fence *last_direct;
struct dma_fence *last_delayed;
unsigned int pasid; unsigned int pasid;
/* dedicated to vm */ /* dedicated to vm */
......
...@@ -104,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -104,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r) if (r)
goto error; goto error;
tmp = dma_fence_get(f); if (p->direct) {
if (p->direct) tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp); swap(p->vm->last_direct, tmp);
else dma_fence_put(tmp);
swap(p->vm->last_delayed, tmp); } else {
dma_fence_put(tmp); dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
}
if (fence && !p->direct) if (fence && !p->direct)
swap(*fence, f); swap(*fence, f);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment