Commit cb7b6ec2 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add bo_va cleared flag again v2

We changed this to use an extra list a while back, but for the next
series I need a separate flag again.

v2: reorder to avoid unlocked list access
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3d7d4d3a
...@@ -55,6 +55,9 @@ struct amdgpu_bo_va { ...@@ -55,6 +55,9 @@ struct amdgpu_bo_va {
/* mappings for this bo_va */ /* mappings for this bo_va */
struct list_head invalids; struct list_head invalids;
struct list_head valids; struct list_head valids;
/* If the mappings are cleared or filled */
bool cleared;
}; };
struct amdgpu_bo { struct amdgpu_bo {
......
...@@ -1792,11 +1792,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1792,11 +1792,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
bo_va->base.moved = false; bo_va->base.moved = false;
list_splice_init(&bo_va->valids, &bo_va->invalids); list_splice_init(&bo_va->valids, &bo_va->invalids);
} else { } else if (bo_va->cleared != clear) {
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->base.vm_status))
list_splice_init(&bo_va->valids, &bo_va->invalids); list_splice_init(&bo_va->valids, &bo_va->invalids);
spin_unlock(&vm->status_lock);
} }
list_for_each_entry(mapping, &bo_va->invalids, list) { list_for_each_entry(mapping, &bo_va->invalids, list) {
...@@ -1807,25 +1804,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1807,25 +1804,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r; return r;
} }
if (trace_amdgpu_vm_bo_mapping_enabled()) { if (vm->use_cpu_for_update) {
list_for_each_entry(mapping, &bo_va->valids, list) /* Flush HDP */
trace_amdgpu_vm_bo_mapping(mapping); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
list_for_each_entry(mapping, &bo_va->invalids, list)
trace_amdgpu_vm_bo_mapping(mapping);
} }
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->base.vm_status); list_del_init(&bo_va->base.vm_status);
if (clear)
list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) { list_splice_init(&bo_va->invalids, &bo_va->valids);
/* Flush HDP */ bo_va->cleared = clear;
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); if (trace_amdgpu_vm_bo_mapping_enabled()) {
list_for_each_entry(mapping, &bo_va->valids, list)
trace_amdgpu_vm_bo_mapping(mapping);
} }
return 0; return 0;
...@@ -2427,9 +2421,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2427,9 +2421,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
list_for_each_entry(bo_base, &bo->va, bo_list) { list_for_each_entry(bo_base, &bo->va, bo_list) {
bo_base->moved = true; bo_base->moved = true;
spin_lock(&bo_base->vm->status_lock); spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status)) list_move(&bo_base->vm_status, &bo_base->vm->moved);
list_add(&bo_base->vm_status,
&bo_base->vm->moved);
spin_unlock(&bo_base->vm->status_lock); spin_unlock(&bo_base->vm->status_lock);
} }
} }
...@@ -2516,7 +2508,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2516,7 +2508,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->reserved_vmid[i] = NULL; vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock); spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */ /* create scheduler entity for page table updates */
......
...@@ -129,9 +129,6 @@ struct amdgpu_vm { ...@@ -129,9 +129,6 @@ struct amdgpu_vm {
/* BOs moved, but not yet updated in the PT */ /* BOs moved, but not yet updated in the PT */
struct list_head moved; struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */ /* BO mappings freed, but not yet updated in the PT */
struct list_head freed; struct list_head freed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment