Commit 1baa439f authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: allocate shadow for pd/pt bo V2

The pd/pt shadow bo will be used to backup page table, when gpu reset
happens, we can restore the page table by them.
V2:
Free shadow bo.
Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 14fd833e
......@@ -1328,7 +1328,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW,
NULL, resv, &pt);
if (r)
goto error_free;
......@@ -1527,7 +1528,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
......@@ -1583,10 +1585,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(mapping);
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
if (vm->page_tables[i].entry.robj &&
vm->page_tables[i].entry.robj->shadow)
amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
}
drm_free_large(vm->page_tables);
if (vm->page_directory->shadow)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment