Commit 8f19cd78 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove last_entry_used from the VM code

Not needed any more.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e3a1b32a
...@@ -329,9 +329,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, ...@@ -329,9 +329,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
to >= amdgpu_vm_num_entries(adev, level)) to >= amdgpu_vm_num_entries(adev, level))
return -EINVAL; return -EINVAL;
if (to > parent->last_entry_used)
parent->last_entry_used = to;
++level; ++level;
saddr = saddr & ((1 << shift) - 1); saddr = saddr & ((1 << shift) - 1);
eaddr = eaddr & ((1 << shift) - 1); eaddr = eaddr & ((1 << shift) - 1);
...@@ -1184,16 +1181,19 @@ static int amdgpu_vm_update_pde(struct amdgpu_device *adev, ...@@ -1184,16 +1181,19 @@ static int amdgpu_vm_update_pde(struct amdgpu_device *adev,
* *
* Mark all PD level as invalid after an error. * Mark all PD level as invalid after an error.
*/ */
static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
struct amdgpu_vm_pt *parent) struct amdgpu_vm *vm,
struct amdgpu_vm_pt *parent,
unsigned level)
{ {
unsigned pt_idx; unsigned pt_idx, num_entries;
/* /*
* Recurse into the subdirectories. This recursion is harmless because * Recurse into the subdirectories. This recursion is harmless because
* we only have a maximum of 5 layers. * we only have a maximum of 5 layers.
*/ */
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { num_entries = amdgpu_vm_num_entries(adev, level);
for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
if (!entry->base.bo) if (!entry->base.bo)
...@@ -1204,7 +1204,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, ...@@ -1204,7 +1204,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
if (list_empty(&entry->base.vm_status)) if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated); list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
amdgpu_vm_invalidate_level(vm, entry); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
} }
} }
...@@ -1246,7 +1246,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -1246,7 +1246,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
r = amdgpu_vm_update_pde(adev, vm, pt, entry); r = amdgpu_vm_update_pde(adev, vm, pt, entry);
if (r) { if (r) {
amdgpu_vm_invalidate_level(vm, &vm->root); amdgpu_vm_invalidate_level(adev, vm,
&vm->root, 0);
return r; return r;
} }
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
...@@ -1649,7 +1650,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1649,7 +1650,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free: error_free:
amdgpu_job_free(job); amdgpu_job_free(job);
amdgpu_vm_invalidate_level(vm, &vm->root); amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
return r; return r;
} }
...@@ -2713,26 +2714,31 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2713,26 +2714,31 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/** /**
* amdgpu_vm_free_levels - free PD/PT levels * amdgpu_vm_free_levels - free PD/PT levels
* *
* @level: PD/PT starting level to free * @adev: amdgpu device structure
* @parent: PD/PT starting level to free
* @level: level of parent structure
* *
* Free the page directory or page table level and all sub levels. * Free the page directory or page table level and all sub levels.
*/ */
static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
struct amdgpu_vm_pt *parent,
unsigned level)
{ {
unsigned i; unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
if (level->base.bo) { if (parent->base.bo) {
list_del(&level->base.bo_list); list_del(&parent->base.bo_list);
list_del(&level->base.vm_status); list_del(&parent->base.vm_status);
amdgpu_bo_unref(&level->base.bo->shadow); amdgpu_bo_unref(&parent->base.bo->shadow);
amdgpu_bo_unref(&level->base.bo); amdgpu_bo_unref(&parent->base.bo);
} }
if (level->entries) if (parent->entries)
for (i = 0; i <= level->last_entry_used; i++) for (i = 0; i < num_entries; i++)
amdgpu_vm_free_levels(&level->entries[i]); amdgpu_vm_free_levels(adev, &parent->entries[i],
level + 1);
kvfree(level->entries); kvfree(parent->entries);
} }
/** /**
...@@ -2790,7 +2796,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2790,7 +2796,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) { if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else { } else {
amdgpu_vm_free_levels(&vm->root); amdgpu_vm_free_levels(adev, &vm->root, 0);
amdgpu_bo_unreserve(root); amdgpu_bo_unreserve(root);
} }
amdgpu_bo_unref(&root); amdgpu_bo_unref(&root);
......
...@@ -142,7 +142,6 @@ struct amdgpu_vm_pt { ...@@ -142,7 +142,6 @@ struct amdgpu_vm_pt {
/* array of page tables, one for each directory entry */ /* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries; struct amdgpu_vm_pt *entries;
unsigned last_entry_used;
}; };
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr)) #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment