Commit 1c16c0a7 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: keep the owner for VMIDs

We don't need the last VM use any more, keep the owner directly.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <davdi1.zhou@amd.com>
parent ea89f8c9
...@@ -925,8 +925,6 @@ struct amdgpu_vm_id { ...@@ -925,8 +925,6 @@ struct amdgpu_vm_id {
uint64_t pd_gpu_addr; uint64_t pd_gpu_addr;
/* last flushed PD/PT update */ /* last flushed PD/PT update */
struct fence *flushed_updates; struct fence *flushed_updates;
/* last use of vmid */
struct fence *last_id_use;
}; };
struct amdgpu_vm { struct amdgpu_vm {
...@@ -959,7 +957,11 @@ struct amdgpu_vm { ...@@ -959,7 +957,11 @@ struct amdgpu_vm {
}; };
struct amdgpu_vm_manager { struct amdgpu_vm_manager {
struct fence *active[AMDGPU_NUM_VM]; struct {
struct fence *active;
atomic_long_t owner;
} ids[AMDGPU_NUM_VM];
uint32_t max_pfn; uint32_t max_pfn;
/* number of VMIDs */ /* number of VMIDs */
unsigned nvm; unsigned nvm;
......
...@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i; unsigned i;
/* check if the id is still valid */ /* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use && if (vm_id->id) {
vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { unsigned id = vm_id->id;
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); long owner;
return 0;
owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
if (owner == (long)vm) {
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
return 0;
}
} }
/* we definately need to flush */ /* we definately need to flush */
...@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* skip over VMID 0, since it is the system VM */ /* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.nvm; ++i) { for (i = 1; i < adev->vm_manager.nvm; ++i) {
struct fence *fence = adev->vm_manager.active[i]; struct fence *fence = adev->vm_manager.ids[i].active;
struct amdgpu_ring *fring; struct amdgpu_ring *fring;
if (fence == NULL) { if (fence == NULL) {
...@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (choices[i]) { if (choices[i]) {
struct fence *fence; struct fence *fence;
fence = adev->vm_manager.active[choices[i]]; fence = adev->vm_manager.ids[choices[i]].active;
vm_id->id = choices[i]; vm_id->id = choices[i];
trace_amdgpu_vm_grab_id(choices[i], ring->idx); trace_amdgpu_vm_grab_id(choices[i], ring->idx);
...@@ -246,11 +251,9 @@ void amdgpu_vm_fence(struct amdgpu_device *adev, ...@@ -246,11 +251,9 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
unsigned vm_id = vm->ids[ring->idx].id; unsigned vm_id = vm->ids[ring->idx].id;
fence_put(adev->vm_manager.active[vm_id]); fence_put(adev->vm_manager.ids[vm_id].active);
adev->vm_manager.active[vm_id] = fence_get(fence); adev->vm_manager.ids[vm_id].active = fence_get(fence);
atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
fence_put(vm->ids[ring->idx].last_id_use);
vm->ids[ring->idx].last_id_use = fence_get(fence);
} }
/** /**
...@@ -1238,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1238,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].id = 0; vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL; vm->ids[i].flushed_updates = NULL;
vm->ids[i].last_id_use = NULL;
} }
mutex_init(&vm->mutex); mutex_init(&vm->mutex);
vm->va = RB_ROOT; vm->va = RB_ROOT;
...@@ -1312,8 +1314,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1312,8 +1314,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
fence_put(vm->page_directory_fence); fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
unsigned id = vm->ids[i].id;
atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
(long)vm, 0);
fence_put(vm->ids[i].flushed_updates); fence_put(vm->ids[i].flushed_updates);
fence_put(vm->ids[i].last_id_use);
} }
mutex_destroy(&vm->mutex); mutex_destroy(&vm->mutex);
...@@ -1331,5 +1336,5 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) ...@@ -1331,5 +1336,5 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
unsigned i; unsigned i;
for (i = 0; i < AMDGPU_NUM_VM; ++i) for (i = 0; i < AMDGPU_NUM_VM; ++i)
fence_put(adev->vm_manager.active[i]); fence_put(adev->vm_manager.ids[i].active);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment