Commit 26e20235 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin Committed by Alex Deucher

drm/amdgpu: Add amdgpu_bo_is_vm_bo helper

Help code readability by replacing a bunch of:

bo->tbo.base.resv == vm->root.bo->tbo.base.resv

With:

amdgpu_vm_is_bo_always_valid(vm, bo)

No functional changes.

v2:
 * Rename helper and move to amdgpu_vm. (Christian)

v3:
 * Use Christian's kerneldoc.

v4:
 * Fixed logic inversion in amdgpu_vm_bo_get_memory.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Christian König <christian.koenig@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e060c7ba
...@@ -174,7 +174,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, ...@@ -174,7 +174,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
return -EPERM; return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
abo->tbo.base.resv != vm->root.bo->tbo.base.resv) !amdgpu_vm_is_bo_always_valid(vm, abo))
return -EPERM; return -EPERM;
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, false);
......
...@@ -333,7 +333,7 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, ...@@ -333,7 +333,7 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo; base->next = bo->vm_bo;
bo->vm_bo = base; bo->vm_bo = base;
if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return; return;
dma_resv_assert_held(vm->root.bo->tbo.base.resv); dma_resv_assert_held(vm->root.bo->tbo.base.resv);
...@@ -1101,13 +1101,13 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, ...@@ -1101,13 +1101,13 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
* For now ignore BOs which are currently locked and potentially * For now ignore BOs which are currently locked and potentially
* changing their location. * changing their location.
*/ */
if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
!dma_resv_trylock(bo->tbo.base.resv)) !dma_resv_trylock(bo->tbo.base.resv))
return; return;
amdgpu_bo_get_memory(bo, stats); amdgpu_bo_get_memory(bo, stats);
if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) if (!amdgpu_vm_is_bo_always_valid(vm, bo))
dma_resv_unlock(bo->tbo.base.resv); dma_resv_unlock(bo->tbo.base.resv);
} }
void amdgpu_vm_get_memory(struct amdgpu_vm *vm, void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
...@@ -1203,8 +1203,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1203,8 +1203,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
uncached = false; uncached = false;
} }
if (clear || (bo && bo->tbo.base.resv == if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
vm->root.bo->tbo.base.resv))
last_update = &vm->last_update; last_update = &vm->last_update;
else else
last_update = &bo_va->last_pt_update; last_update = &bo_va->last_pt_update;
...@@ -1246,7 +1245,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1246,7 +1245,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* the evicted list so that it gets validated again on the * the evicted list so that it gets validated again on the
* next command submission. * next command submission.
*/ */
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
uint32_t mem_type = bo->tbo.resource->mem_type; uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains & if (!(bo->preferred_domains &
...@@ -1640,10 +1639,9 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, ...@@ -1640,10 +1639,9 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev)) if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
!bo_va->base.moved) {
amdgpu_vm_bo_moved(&bo_va->base); amdgpu_vm_bo_moved(&bo_va->base);
}
trace_amdgpu_vm_bo_map(bo_va, mapping); trace_amdgpu_vm_bo_map(bo_va, mapping);
} }
...@@ -1942,7 +1940,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -1942,7 +1940,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
if (before->flags & AMDGPU_PTE_PRT_FLAG(adev)) if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
!before->bo_va->base.moved) !before->bo_va->base.moved)
amdgpu_vm_bo_moved(&before->bo_va->base); amdgpu_vm_bo_moved(&before->bo_va->base);
} else { } else {
...@@ -1957,7 +1955,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -1957,7 +1955,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
if (after->flags & AMDGPU_PTE_PRT_FLAG(adev)) if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
!after->bo_va->base.moved) !after->bo_va->base.moved)
amdgpu_vm_bo_moved(&after->bo_va->base); amdgpu_vm_bo_moved(&after->bo_va->base);
} else { } else {
...@@ -2037,7 +2035,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev, ...@@ -2037,7 +2035,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (bo) { if (bo) {
dma_resv_assert_held(bo->tbo.base.resv); dma_resv_assert_held(bo->tbo.base.resv);
if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) if (amdgpu_vm_is_bo_always_valid(vm, bo))
ttm_bo_set_bulk_move(&bo->tbo, NULL); ttm_bo_set_bulk_move(&bo->tbo, NULL);
for (base = &bo_va->base.bo->vm_bo; *base; for (base = &bo_va->base.bo->vm_bo; *base;
...@@ -2131,7 +2129,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2131,7 +2129,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm; struct amdgpu_vm *vm = bo_base->vm;
if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
amdgpu_vm_bo_evicted(bo_base); amdgpu_vm_bo_evicted(bo_base);
continue; continue;
} }
...@@ -2142,7 +2140,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2142,7 +2140,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
if (bo->tbo.type == ttm_bo_type_kernel) if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(bo_base); amdgpu_vm_bo_relocated(bo_base);
else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) else if (amdgpu_vm_is_bo_always_valid(vm, bo))
amdgpu_vm_bo_moved(bo_base); amdgpu_vm_bo_moved(bo_base);
else else
amdgpu_vm_bo_invalidated(bo_base); amdgpu_vm_bo_invalidated(bo_base);
...@@ -3006,3 +3004,16 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, ...@@ -3006,3 +3004,16 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
} }
/**
* amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
*
* @vm: VM to test against.
* @abo: BO to be tested.
*
* Returns true if the BO shares the dma_resv object with the root PD and is
* always guaranteed to be valid inside the VM.
*/
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
...@@ -580,6 +580,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); ...@@ -580,6 +580,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
/** /**
* amdgpu_vm_tlb_seq - return tlb flush sequence number * amdgpu_vm_tlb_seq - return tlb flush sequence number
* @vm: the amdgpu_vm structure to query * @vm: the amdgpu_vm structure to query
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment