Commit 998debbd authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: Use vm status_lock to protect vm moved list

Use vm_status_lock to protect all vm_status state transitions to allow
them to happen without a reservation lock in unlocked page table
updates.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c1806d78
...@@ -198,7 +198,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) ...@@ -198,7 +198,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/ */
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{ {
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved); list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
} }
/** /**
...@@ -1287,19 +1289,24 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, ...@@ -1287,19 +1289,24 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
struct amdgpu_bo_va *bo_va, *tmp; struct amdgpu_bo_va *bo_va;
struct dma_resv *resv; struct dma_resv *resv;
bool clear; bool clear;
int r; int r;
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
base.vm_status);
spin_unlock(&vm->status_lock);
/* Per VM BOs never need to bo cleared in the page tables */ /* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
spin_lock(&vm->status_lock);
} }
spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) { while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status); base.vm_status);
...@@ -1396,7 +1403,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, ...@@ -1396,7 +1403,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!bo_va->base.moved) { !bo_va->base.moved) {
list_move(&bo_va->base.vm_status, &vm->moved); amdgpu_vm_bo_moved(&bo_va->base);
} }
trace_amdgpu_vm_bo_map(bo_va, mapping); trace_amdgpu_vm_bo_map(bo_va, mapping);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment