Commit 914b4dce authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: stop using a bo list entry for the VM PTs

Saves us a bit of memory.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f7da30d9
...@@ -820,8 +820,8 @@ struct amdgpu_ring { ...@@ -820,8 +820,8 @@ struct amdgpu_ring {
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt { struct amdgpu_vm_pt {
struct amdgpu_bo_list_entry entry; struct amdgpu_bo *bo;
uint64_t addr; uint64_t addr;
}; };
struct amdgpu_vm { struct amdgpu_vm {
......
...@@ -142,12 +142,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -142,12 +142,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/* add the vm page table to the list */ /* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) { for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; struct amdgpu_bo *bo = vm->page_tables[i].bo;
if (!entry->robj) if (!bo)
continue; continue;
r = validate(param, entry->robj); r = validate(param, bo);
if (r) if (r)
return r; return r;
} }
...@@ -171,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, ...@@ -171,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) { for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; struct amdgpu_bo *bo = vm->page_tables[i].bo;
if (!entry->robj) if (!bo)
continue; continue;
ttm_bo_move_to_lru_tail(&entry->robj->tbo); ttm_bo_move_to_lru_tail(&bo->tbo);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
...@@ -674,7 +674,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -674,7 +674,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */ /* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt; uint64_t pde, pt;
if (bo == NULL) if (bo == NULL)
...@@ -790,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, ...@@ -790,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */ /* initialize the variables */
addr = start; addr = start;
pt_idx = addr >> amdgpu_vm_block_size; pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj; pt = vm->page_tables[pt_idx].bo;
if (params->shadow) { if (params->shadow) {
if (!pt->shadow) if (!pt->shadow)
return; return;
pt = vm->page_tables[pt_idx].entry.robj->shadow; pt = pt->shadow;
} }
if ((addr & ~mask) == (end & ~mask)) if ((addr & ~mask) == (end & ~mask))
nptes = end - addr; nptes = end - addr;
...@@ -813,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, ...@@ -813,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */ /* walk over the address space and update the page tables */
while (addr < end) { while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size; pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj; pt = vm->page_tables[pt_idx].bo;
if (params->shadow) { if (params->shadow) {
if (!pt->shadow) if (!pt->shadow)
return; return;
pt = vm->page_tables[pt_idx].entry.robj->shadow; pt = pt->shadow;
} }
if ((addr & ~mask) == (end & ~mask)) if ((addr & ~mask) == (end & ~mask))
...@@ -1425,11 +1425,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1425,11 +1425,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */ /* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv; struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt; struct amdgpu_bo *pt;
entry = &vm->page_tables[pt_idx].entry; if (vm->page_tables[pt_idx].bo)
if (entry->robj)
continue; continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
...@@ -1463,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1463,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
} }
} }
entry->robj = pt; vm->page_tables[pt_idx].bo = pt;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0; vm->page_tables[pt_idx].addr = 0;
} }
...@@ -1719,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1719,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
} }
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt) if (!pt)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment