Commit 7f8a5290 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: rework vm_grab_id interface

This makes assigning VM IDs independent from the use of VM IDs.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent fc8fa5e4
...@@ -2294,8 +2294,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); ...@@ -2294,8 +2294,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct list_head *head); struct list_head *head);
struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_vm *vm); struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring, void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_fence *updates); struct amdgpu_fence *updates);
......
...@@ -165,9 +165,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -165,9 +165,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) { if (vm) {
/* grab a vm id if necessary */ /* grab a vm id if necessary */
struct amdgpu_fence *vm_id_fence = NULL; r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
r = amdgpu_sync_fence(adev, &ibs->sync, &vm_id_fence->base);
if (r) { if (r) {
amdgpu_ring_unlock_undo(ring); amdgpu_ring_unlock_undo(ring);
return r; return r;
......
...@@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, ...@@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
/** /**
* amdgpu_vm_grab_id - allocate the next free VMID * amdgpu_vm_grab_id - allocate the next free VMID
* *
* @ring: ring we want to submit job to
* @vm: vm to allocate id for * @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* *
* Allocate an id for the vm (cayman+). * Allocate an id for the vm, adding fences to the sync obj as necessary.
* Returns the fence we need to sync to (if any).
* *
* Global and local mutex must be locked! * Global mutex must be locked!
*/ */
struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_vm *vm) struct amdgpu_sync *sync)
{ {
struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
...@@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, ...@@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
/* check if the id is still valid */ /* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use && if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
return NULL; return 0;
/* we definately need to flush */ /* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll; vm_id->pd_gpu_addr = ~0ll;
...@@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, ...@@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
/* found a free one */ /* found a free one */
vm_id->id = i; vm_id->id = i;
trace_amdgpu_vm_grab_id(i, ring->idx); trace_amdgpu_vm_grab_id(i, ring->idx);
return NULL; return 0;
} }
if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
...@@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, ...@@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
for (i = 0; i < 2; ++i) { for (i = 0; i < 2; ++i) {
if (choices[i]) { if (choices[i]) {
struct amdgpu_fence *fence;
fence = adev->vm_manager.active[choices[i]];
vm_id->id = choices[i]; vm_id->id = choices[i];
trace_amdgpu_vm_grab_id(choices[i], ring->idx); trace_amdgpu_vm_grab_id(choices[i], ring->idx);
return adev->vm_manager.active[choices[i]]; return amdgpu_sync_fence(ring->adev, sync, &fence->base);
} }
} }
/* should never happen */ /* should never happen */
BUG(); BUG();
return NULL; return -EINVAL;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment